var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https%3A%2F%2Fwww.dropbox.com%2Fs%2Fx2bon6vqlc79ekc%2FFigueroaPublications.bib%3Fdl%3D1&jsonp=1&[owner]=[Figueroa,%20N.]&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https%3A%2F%2Fwww.dropbox.com%2Fs%2Fx2bon6vqlc79ekc%2FFigueroaPublications.bib%3Fdl%3D1&jsonp=1&[owner]=[Figueroa,%20N.]\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https%3A%2F%2Fwww.dropbox.com%2Fs%2Fx2bon6vqlc79ekc%2FFigueroaPublications.bib%3Fdl%3D1&jsonp=1&[owner]=[Figueroa,%20N.]\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2024\n \n \n (9)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n On-Manifold Strategies for Reactive Dynamical System Modulation with Non-Convex Obstacles.\n \n \n \n \n\n\n \n Fourie, C. K.; Figueroa, N.; and Shah, J. A.\n\n\n \n\n\n\n IEEE Transactions on Robotics (TRO),1-20. 2024.\n \n\n\n\n
\n\n\n\n \n \n \"On-ManifoldPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{10473149,\n  author={Fourie, Christopher K. and Figueroa, Nadia and Shah, Julie A.},\n  journal={IEEE Transactions on Robotics (TRO)}, \n  title={On-Manifold Strategies for Reactive Dynamical System Modulation with Non-Convex Obstacles}, \n  year={2024},\n  volume={},\n  number={},\n  pages={1-20},\n  doi={10.1109/TRO.2024.3378179},\n  url={https://ieeexplore.ieee.org/document/10473149}}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Directionality-Aware Mixture Model Parallel Sampling for Efficient Linear Parameter Varying Dynamical System Learning.\n \n \n \n\n\n \n Sun, S.; Gao, H.; Li, T.; and Figueroa, N.\n\n\n \n\n\n\n IEEE Robotics and Automation Letters (RA-L) [Conditionally Accepted]. 2024.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{sun2024directionalityaware,\n  author={Sunan Sun and Haihui Gao and Tianyu Li and Nadia Figueroa},\n  journal={IEEE Robotics and Automation Letters (RA-L) [Conditionally Accepted]}, \n  title={Directionality-Aware Mixture Model Parallel Sampling for Efficient Linear Parameter Varying Dynamical System Learning}, \n  year={2024},\n  volume={},\n  number={},\n  pages={},\n  doi={}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Reactive collision-free motion generation in joint space via dynamical systems and sampling-based MPC.\n \n \n \n \n\n\n \n Koptev, M.; Figueroa, N.; and Billard, A.\n\n\n \n\n\n\n The International Journal of Robotics Research, 0(0): 0. 2024.\n \n\n\n\n
\n\n\n\n \n \n \"ReactivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{koptev2024ijrr,\nauthor = {Mikhail Koptev and Nadia Figueroa and Aude Billard},\ntitle ={Reactive collision-free motion generation in joint space via dynamical systems and sampling-based MPC},\njournal = {The International Journal of Robotics Research},\nvolume = {0},\nnumber = {0},\npages = {0},\nyear = {2024},\ndoi = {10.1177/02783649241246557},\nURL = {https://doi.org/10.1177/02783649241246557},\neprint = {https://doi.org/10.1177/02783649241246557},\nabstract = { Dynamical system (DS) based motion planning offers collision-free motion, with closed-loop reactivity thanks to their analytical expression. It ensures that obstacles are not penetrated by reshaping a nominal DS through matrix modulation, which is constructed using continuously differentiable obstacle representations. However, state-of-the-art approaches may suffer from local minima induced by non-convex obstacles, thus failing to scale to complex, high-dimensional joint spaces. On the other hand, sampling-based Model Predictive Control (MPC) techniques provide feasible collision-free paths in joint-space, yet are limited to quasi-reactive scenarios due to computational complexity that grows cubically with space dimensionality and horizon length. To control the robot in the cluttered environment with moving obstacles, and to generate feasible and highly reactive collision-free motion in robots’ joint space, we present an approach for modulating joint-space DS using sampling-based MPC. Specifically, a nominal DS representing an unconstrained desired joint space motion to a target is locally deflected with obstacle-tangential velocity components navigating the robot around obstacles and avoiding local minima. Such tangential velocity components are constructed from receding horizon collision-free paths generated asynchronously by the sampling-based MPC. Notably, the MPC is not required to run constantly, but only activated when the local minima is detected. The approach is validated in simulation and real-world experiments on a 7-DoF robot demonstrating the capability of avoiding concave obstacles, while maintaining local attractor stability in both quasi-static and highly dynamic cluttered environments.}}\n\n\n
\n
\n\n\n
\n Dynamical system (DS) based motion planning offers collision-free motion, with closed-loop reactivity thanks to their analytical expression. It ensures that obstacles are not penetrated by reshaping a nominal DS through matrix modulation, which is constructed using continuously differentiable obstacle representations. However, state-of-the-art approaches may suffer from local minima induced by non-convex obstacles, thus failing to scale to complex, high-dimensional joint spaces. On the other hand, sampling-based Model Predictive Control (MPC) techniques provide feasible collision-free paths in joint-space, yet are limited to quasi-reactive scenarios due to computational complexity that grows cubically with space dimensionality and horizon length. To control the robot in the cluttered environment with moving obstacles, and to generate feasible and highly reactive collision-free motion in robots’ joint space, we present an approach for modulating joint-space DS using sampling-based MPC. Specifically, a nominal DS representing an unconstrained desired joint space motion to a target is locally deflected with obstacle-tangential velocity components navigating the robot around obstacles and avoiding local minima. Such tangential velocity components are constructed from receding horizon collision-free paths generated asynchronously by the sampling-based MPC. Notably, the MPC is not required to run constantly, but only activated when the local minima is detected. The approach is validated in simulation and real-world experiments on a 7-DoF robot demonstrating the capability of avoiding concave obstacles, while maintaining local attractor stability in both quasi-static and highly dynamic cluttered environments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the Feasibility of EEG-based Motor Intention Detection for Real-Time Robot Assistive Control.\n \n \n \n \n\n\n \n Choi, H. J.; Das, S.; Peng, S.; Bajcsy, R.; and Figueroa, N.\n\n\n \n\n\n\n In IEEE International Conference on Robotics and Automation (ICRA), 2024. \n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{choi2024eeg,\n  author={Ho Jin Choi and Satyajeet Das and Shaoting Peng and Ruzena Bajcsy and Nadia Figueroa},\n  booktitle={IEEE International Conference on Robotics and Automation (ICRA)}, \n  title={On the Feasibility of EEG-based Motor Intention Detection for Real-Time Robot Assistive Control}, \n  year={2024},\n  volume={},\n  number={},\n  pages={},\n  url={https://arxiv.org/abs/2403.08149},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Towards Feasible Dynamic Grasping: Leveraging Gaussian Process Distance Fields, SE(3) Equivariance and Riemannian Mixture Models.\n \n \n \n \n\n\n \n Choi, H. J.; and Figueroa, N.\n\n\n \n\n\n\n In IEEE International Conference on Robotics and Automation (ICRA), 2024. \n \n\n\n\n
\n\n\n\n \n \n \"TowardsPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{choi2024grasping,\n  author={Ho Jin Choi and Nadia Figueroa},\n  booktitle={IEEE International Conference on Robotics and Automation (ICRA)}, \n  title={Towards Feasible Dynamic Grasping: Leveraging Gaussian Process Distance Fields, SE(3) Equivariance and Riemannian Mixture Models}, \n  year={2024},\n  volume={},\n  number={},\n  pages={},\n  url={https://arxiv.org/abs/2311.02576},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Constrained Passive Interaction Control: Leveraging Passivity and Safety for Robot Manipulators.\n \n \n \n \n\n\n \n Zhang, Z.; Li, T.; and Figueroa, N.\n\n\n \n\n\n\n In IEEE International Conference on Robotics and Automation (ICRA), 2024. \n \n\n\n\n
\n\n\n\n \n \n \"ConstrainedPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{zhang2024cpic,\n  author={Zhiquan Zhang and Tianyu Li and Nadia Figueroa},\n  booktitle={IEEE International Conference on Robotics and Automation (ICRA)}, \n  title={Constrained Passive Interaction Control: Leveraging Passivity and Safety for Robot Manipulators}, \n  year={2024},\n  volume={},\n  number={},\n  pages={},\n  url={https://arxiv.org/abs/2403.09853},\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning Complex Motion Plans using Neural ODEs with Safety and Stability Guarantees.\n \n \n \n \n\n\n \n Nawaz, F.; Li, T.; Matni, N.; and Figueroa, N.\n\n\n \n\n\n\n In IEEE International Conference on Robotics and Automation (ICRA), 2024. \n \n\n\n\n
\n\n\n\n \n \n \"LearningPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{nawaz2024cbfnode,\n  author={Farhad Nawaz and Tianyu Li and Nikolai Matni and Nadia Figueroa},\n  booktitle={IEEE International Conference on Robotics and Automation (ICRA)}, \n  title={Learning Complex Motion Plans using Neural ODEs with Safety and Stability Guarantees}, \n  year={2024},\n  volume={},\n  number={},\n  pages={},\n  url={https://arxiv.org/abs/2308.00186},\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Object Permanence Filters for Robust Tracking with Interactive Robots.\n \n \n \n \n\n\n \n Peng, S.; Wang, M. X.; Shah, J.; and Figueroa, N.\n\n\n \n\n\n\n In IEEE International Conference on Robotics and Automation (ICRA), 2024. \n \n\n\n\n
\n\n\n\n \n \n \"ObjectPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{peng2024opf,\n  author={Shaoting Peng and Margaret X. Wang and Julie Shah and Nadia Figueroa},\n  booktitle={IEEE International Conference on Robotics and Automation (ICRA)}, \n  title={Object Permanence Filters for Robust Tracking with Interactive Robots}, \n  year={2024},\n  volume={},\n  number={},\n  pages={},\n  url={https://arxiv.org/abs/2403.08231},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Neural Contractive Dynamical Systems.\n \n \n \n \n\n\n \n Beik-Mohammadi, H.; Hauberg, S.; Arvanitidis, G.; Figueroa, N.; Neumann, G.; and Rozo, L.\n\n\n \n\n\n\n In The Twelfth International Conference on Learning Representations (ICLR), 2024. \n \n\n\n\n
\n\n\n\n \n \n \"NeuralPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{beik-mohammadi2024ncds,\ntitle={Neural Contractive Dynamical Systems},\nauthor={Hadi Beik-Mohammadi and Søren Hauberg and Georgios Arvanitidis and Nadia Figueroa and Gerhard Neumann and Leonel Rozo},\nbooktitle={The Twelfth International Conference on Learning Representations (ICLR)},\nyear={2024},\nurl={https://openreview.net/forum?id=iAYIRHOYy8}\n}\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2023\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Neural Joint Space Implicit Signed Distance Functions for Reactive Robot Manipulator Control.\n \n \n \n\n\n \n Koptev, M.; Figueroa, N.; and Billard, A.\n\n\n \n\n\n\n IEEE Robotics and Automation Letters (RA-L), 8(2): 480-487. 2023.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{9976191,\n  author={Koptev, Mikhail and Figueroa, Nadia and Billard, Aude},\n  journal={IEEE Robotics and Automation Letters (RA-L)}, \n  title={Neural Joint Space Implicit Signed Distance Functions for Reactive Robot Manipulator Control}, \n  year={2023},\n  volume={8},\n  number={2},\n  pages={480-487},\n  doi={10.1109/LRA.2022.3227860}}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning Realistic Joint Space Boundaries for Range of Motion Analysis of Healthy and Impaired Human Arms.\n \n \n \n \n\n\n \n Keyvanian, S.; Johnson, M. J.; and Figueroa, N.\n\n\n \n\n\n\n In 2023 IEEE-RAS 22nd International Conference on Humanoid Robots (Humanoids), pages 1-8, 2023. \n \n\n\n\n
\n\n\n\n \n \n \"LearningPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{10375147,\n  author={Keyvanian, Shafagh and Johnson, Michelle J. and Figueroa, Nadia},\n  booktitle={2023 IEEE-RAS 22nd International Conference on Humanoid Robots (Humanoids)}, \n  title={Learning Realistic Joint Space Boundaries for Range of Motion Analysis of Healthy and Impaired Human Arms}, \n  year={2023},\n  volume={},\n  number={},\n  pages={1-8},\n  url={https://sites.google.com/seas.upenn.edu/learning-rom/home},}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Task Transfer with Stability Guarantees via Elastic Dynamical System Motion Policies.\n \n \n \n \n\n\n \n Li, T.; and Figueroa, N.\n\n\n \n\n\n\n In 7th Annual Conference on Robot Learning (CORL)., 2023. \n \n\n\n\n
\n\n\n\n \n \n \"TaskPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{li2023task,\ntitle={Task Transfer with Stability Guarantees via Elastic Dynamical System Motion Policies},\nauthor={Tianyu Li and Nadia Figueroa},\nbooktitle={7th Annual Conference on Robot Learning (CORL).},\nyear={2023},\nurl={https://openreview.net/forum?id=8scj3Y0RLq},\nlink={https://openreview.net/forum?id=8scj3Y0RLq}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Ultrafast, Programmable, and Electronics-Free Soft Robots Enabled by Snapping Metacaps.\n \n \n \n \n\n\n \n Jin, L.; Yang, Y.; Maldonado, B. O. T.; Lee, S. D.; Figueroa, N.; Full, R. J.; and Yang, S.\n\n\n \n\n\n\n Advanced Intelligent Systems, n/a(n/a): 2300039. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Ultrafast,Paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{https://doi.org/10.1002/aisy.202300039,\nauthor = {Jin, Lishuai and Yang, Yueying and Maldonado, Bryan O. Torres and Lee, Sebastian David and Figueroa, Nadia and Full, Robert J. and Yang, Shu},\ntitle = {Ultrafast, Programmable, and Electronics-Free Soft Robots Enabled by Snapping Metacaps},\njournal = {Advanced Intelligent Systems},\nvolume = {n/a},\nnumber = {n/a},\npages = {2300039},\nyear = {2023},\nkeywords = {bistability, electronics-free, mechanical metacaps, snap-through, ultrafast grippers},\ndoi = {https://doi.org/10.1002/aisy.202300039},\nurl = {https://onlinelibrary.wiley.com/doi/abs/10.1002/aisy.202300039},\neprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1002/aisy.202300039},\nabstract = {Soft robots offer a myriad of potential because of their intrinsically compliant bodies, enabling safe interactions with humans and adaptability to unpredictable environments. However, most of them have limited actuation speeds, require complex control systems, and lack sensing capabilities. To address these challenges, herein, a class of metacaps is geometrically designed by introducing an array of ribs to a spherical cap with programmable bistabilities and snapping behaviors, enabling several unprecedented soft robotic functionalities. Specifically, a centimeter-sized, sensor-less metacap gripper is demonstrated that can grasp objects in 3.75 ms upon physical contact or pneumatic actuation with tunable behaviors that have little dependence on the rate of input. The grippers can be readily integrated into a robotic platform for practical applications. The metacap can further enable propelling of a swimming robot, exhibiting amplified swimming speed as well as untethered, electronics-free swimming with tunable speeds using an oscillating valve. The metacap designs provide new strategies to enable the next-generation soft robots to achieve high transient output energy and autonomous and electronics-free maneuvering.}\n}\n\n
\n
\n\n\n
\n Soft robots offer a myriad of potential because of their intrinsically compliant bodies, enabling safe interactions with humans and adaptability to unpredictable environments. However, most of them have limited actuation speeds, require complex control systems, and lack sensing capabilities. To address these challenges, herein, a class of metacaps is geometrically designed by introducing an array of ribs to a spherical cap with programmable bistabilities and snapping behaviors, enabling several unprecedented soft robotic functionalities. Specifically, a centimeter-sized, sensor-less metacap gripper is demonstrated that can grasp objects in 3.75 ms upon physical contact or pneumatic actuation with tunable behaviors that have little dependence on the rate of input. The grippers can be readily integrated into a robotic platform for practical applications. The metacap can further enable propelling of a swimming robot, exhibiting amplified swimming speed as well as untethered, electronics-free swimming with tunable speeds using an oscillating valve. The metacap designs provide new strategies to enable the next-generation soft robots to achieve high transient output energy and autonomous and electronics-free maneuvering.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2022\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Temporal Logic Imitation: Learning Plan-Satisficing Motion Policies from Demonstrations.\n \n \n \n \n\n\n \n Wang, Y.; Figueroa, N.; Li, S.; Shah, A.; and Shah, J.\n\n\n \n\n\n\n In 6th Annual Conference on Robot Learning, 2022. \n \n\n\n\n
\n\n\n\n \n \n \"TemporalPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 17 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{wang2022temporal,\ntitle={Temporal Logic Imitation: Learning Plan-Satisficing Motion Policies from Demonstrations},\nauthor={Yanwei Wang and Nadia Figueroa and Shen Li and Ankit Shah and Julie Shah},\nbooktitle={6th Annual Conference on Robot Learning},\nyear={2022},\nurl={https://openreview.net/forum?id=ndYsaoyzCWv}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Implicit Distance Functions: Learning and Applications in Control.\n \n \n \n \n\n\n \n Koptev, M.; Figueroa, N.; and Billard, A.\n\n\n \n\n\n\n In Proceedings of Workshop on Motion Planning with Implicit Neural Representations of Geometry. IEEE/RSJ International Conference on Intelligent Robots and Systems, 2022.\n \n\n\n\n
\n\n\n\n \n \n \"Implicit paper\n  \n \n \n \"Implicit link\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 12 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@MISC{Koptev:Implicit:2022,\nauthor       = {Koptev, M. and Figueroa, N. and Billard, A.},\ntitle        = {Implicit Distance Functions: Learning and Applications in Control},\nhowpublished = {In Proceedings of Workshop on Motion Planning with Implicit Neural Representations of Geometry. IEEE/RSJ International Conference on Intelligent Robots and Systems},\nyear         = {2022},\nabstract     = {This paper describes a novel approach to learn an implicit, differentiable distance function for arbitrary configurations of a robotic manipulator used for reactive control. By exploiting GPU processing, we efficiently query the learned collision representation and obtain an implicit distance between the robot and the environment. The differentiable nature of the learned function allows for calculating valid gradients wrt. any robot configuration, providing a repulsive vector field in joint space that can be injected in various control methods to improve collision avoidance. We present preliminary results on solving collision avoidance for a 7DoF robot with a reactive inverse kinematics solution, as well as improving performance of a sampling-based model-predictive controller.},\nurl_Paper={https://infoscience.epfl.ch/record/294291},\nurl_Link={https://neural-implicit-workshop.stanford.edu/},\n}\n\n\n
\n
\n\n\n
\n This paper describes a novel approach to learn an implicit, differentiable distance function for arbitrary configurations of a robotic manipulator used for reactive control. By exploiting GPU processing, we efficiently query the learned collision representation and obtain an implicit distance between the robot and the environment. The differentiable nature of the learned function allows for calculating valid gradients wrt. any robot configuration, providing a repulsive vector field in joint space that can be injected in various control methods to improve collision avoidance. We present preliminary results on solving collision avoidance for a 7DoF robot with a reactive inverse kinematics solution, as well as improving performance of a sampling-based model-predictive controller.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Locally active globally stable dynamical systems: Theory, learning, and experiments.\n \n \n \n \n\n\n \n Figueroa, N.; and Billard, A.\n\n\n \n\n\n\n The International Journal of Robotics Research, 41(3): 312-347. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"Locally link\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 14 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Figueroa:IJRR:2022,\n  author={N. {Figueroa} and A. {Billard}},\n  title ={Locally active globally stable dynamical systems: Theory, learning, and experiments},\n  journal = {The International Journal of Robotics Research},\n  volume = {41},\n  number = {3},\n  pages = {312-347},\n  year = {2022},\n  doi={10.1177/02783649211030952},\n  url_Link={https://doi.org/10.1177/02783649211030952}, \n  abstract={State-dependent dynamical systems (DSs) offer adaptivity, reactivity, and robustness to perturbations in motion planning and physical human–robot interaction tasks. Learning DS-based motion plans from non-linear reference trajectories is an active research area in robotics. Most approaches focus on learning DSs that can (i) accurately mimic the demonstrated motion, while (ii) ensuring convergence to the target, i.e., they are globally asymptotically (or exponentially) stable. When subject to perturbations, a compliant robot guided with a DS will continue following the next integral curves of the DS towards the target. If the task requires the robot to track a specific reference trajectory, this approach will fail. To alleviate this shortcoming, we propose the locally active globally stable DS (LAGS-DS), a novel DS formulation that provides both global convergence and stiffness-like symmetric attraction behaviors around a reference trajectory in regions of the state space where trajectory tracking is important. This allows for a unified approach towards motion and impedance encoding in a single DS-based motion model, i.e., stiffness is embedded in the DS. To learn LAGS-DS from demonstrations we propose a learning strategy based on Bayesian non-parametric Gaussian mixture models, Gaussian processes, and a sequence of constrained optimization problems that ensure estimation of stable DS parameters via Lyapunov theory. We experimentally validated LAGS-DS on writing tasks with a KUKA LWR 4+ arm and on navigation and co-manipulation tasks with iCub humanoid robots.}}\n\n
\n
\n\n\n
\n State-dependent dynamical systems (DSs) offer adaptivity, reactivity, and robustness to perturbations in motion planning and physical human–robot interaction tasks. Learning DS-based motion plans from non-linear reference trajectories is an active research area in robotics. Most approaches focus on learning DSs that can (i) accurately mimic the demonstrated motion, while (ii) ensuring convergence to the target, i.e., they are globally asymptotically (or exponentially) stable. When subject to perturbations, a compliant robot guided with a DS will continue following the next integral curves of the DS towards the target. If the task requires the robot to track a specific reference trajectory, this approach will fail. To alleviate this shortcoming, we propose the locally active globally stable DS (LAGS-DS), a novel DS formulation that provides both global convergence and stiffness-like symmetric attraction behaviors around a reference trajectory in regions of the state space where trajectory tracking is important. This allows for a unified approach towards motion and impedance encoding in a single DS-based motion model, i.e., stiffness is embedded in the DS. To learn LAGS-DS from demonstrations we propose a learning strategy based on Bayesian non-parametric Gaussian mixture models, Gaussian processes, and a sequence of constrained optimization problems that ensure estimation of stable DS parameters via Lyapunov theory. We experimentally validated LAGS-DS on writing tasks with a KUKA LWR 4+ arm and on navigation and co-manipulation tasks with iCub humanoid robots.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Learning for Adaptive and Reactive Robot Control: A Dynamical Systems Approach.\n \n \n \n \n\n\n \n Billard, A.; Mirrazavi Salehian, S. S.; and Figueroa, N.\n\n\n \n\n\n\n of Intelligent Robotics and Autonomous Agents Series,MIT Press, Cambridge, USA, 2022.\n \n\n\n\n
\n\n\n\n \n \n \"Learning link\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Book{DSbook, \n  author={Billard, A. and Mirrazavi Salehian, Seyed Sina and Figueroa, N.},\n  title={Learning for Adaptive and Reactive Robot Control: A Dynamical Systems Approach},\n  url_Link= {https://mitpress.mit.edu/books/learning-adaptive-and-reactive-robot-control}, \n  publisher = {MIT Press},\n  series={Intelligent Robotics and Autonomous Agents Series,},\n  address = {Cambridge, USA},\n  year = {2022}, \n  abstract = {Summary: Methods by which robots can learn control laws that enable real-time reactivity using dynamical systems; with applications and exercises.\n  This book presents a wealth of machine learning techniques to make the control of robots more flexible and safe when interacting with humans. It introduces a set of control laws that enable reactivity using dynamical systems, a widely used method for solving motion-planning problems in robotics. These control approaches can replan in milliseconds to adapt to new environmental constraints and offer safe and compliant control of forces in contact. The techniques offer theoretical advantages, including convergence to a goal, non-penetration of obstacles, and passivity. The coverage of learning begins with low-level control parameters and progresses to higher-level competencies composed of combinations of skills.\n  Learning for Adaptive and Reactive Robot Control can be used in graduate-level courses in robotics, and, the chapters proceed from fundamentals to more advanced content. The first section presents an overview of the techniques introduced, including learning from demonstration, optimization, and reinforcement learning. Subsequent sections present the core techniques for learning control laws with dynamical systems, trajectory planning with dynamical systems, and methods for compliant and force control using dynamical systems. Each chapter describes applications, which range from arm manipulators to whole-body control of humanoid robots, and offers both pencil-and-paper and programming exercises. Lecture videos, slides, and MATLAB code examples are available on the author's website. An instructors-only website offers additional material.}\n}\n\n
\n
\n\n\n
\n Summary: Methods by which robots can learn control laws that enable real-time reactivity using dynamical systems; with applications and exercises. This book presents a wealth of machine learning techniques to make the control of robots more flexible and safe when interacting with humans. It introduces a set of control laws that enable reactivity using dynamical systems, a widely used method for solving motion-planning problems in robotics. These control approaches can replan in milliseconds to adapt to new environmental constraints and offer safe and compliant control of forces in contact. The techniques offer theoretical advantages, including convergence to a goal, non-penetration of obstacles, and passivity. The coverage of learning begins with low-level control parameters and progresses to higher-level competencies composed of combinations of skills. Learning for Adaptive and Reactive Robot Control can be used in graduate-level courses in robotics, and, the chapters proceed from fundamentals to more advanced content. The first section presents an overview of the techniques introduced, including learning from demonstration, optimization, and reinforcement learning. Subsequent sections present the core techniques for learning control laws with dynamical systems, trajectory planning with dynamical systems, and methods for compliant and force control using dynamical systems. Each chapter describes applications, which range from arm manipulators to whole-body control of humanoid robots, and offers both pencil-and-paper and programming exercises. Lecture videos, slides, and MATLAB code examples are available on the author's website. An instructors-only website offers additional material.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n RoCUS: Robot Controller Understanding via Sampling.\n \n \n \n \n\n\n \n Zhou, Y.; Booth, S.; Figueroa, N.; and Shah, J.\n\n\n \n\n\n\n In Faust, A.; Hsu, D.; and Neumann, G., editor(s), Proceedings of the 5th Conference on Robot Learning, volume 164, of Proceedings of Machine Learning Research, pages 850–860, 08–11 Nov 2022. PMLR\n \n\n\n\n
\n\n\n\n \n \n \"RoCUS:Paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{pmlr-v164-zhou22a,\n  title =    {RoCUS: Robot Controller Understanding via Sampling},\n  author =       {Zhou, Yilun and Booth, Serena and Figueroa, Nadia and Shah, Julie},\n  booktitle =    {Proceedings of the 5th Conference on Robot Learning},\n  pages =    {850--860},\n  year =   {2022},\n  editor =   {Faust, Aleksandra and Hsu, David and Neumann, Gerhard},\n  volume =   {164},\n  series =   {Proceedings of Machine Learning Research},\n  month =    {08--11 Nov},\n  publisher =    {PMLR},\n  pdf =    {https://proceedings.mlr.press/v164/zhou22a/zhou22a.pdf},\n  url =    {https://proceedings.mlr.press/v164/zhou22a.html},\n  abstract =   {As robots are deployed in complex situations, engineers and end users must develop a holistic understanding of their behaviors, capabilities, and limitations. Some behaviors are directly optimized by the objective function. They often include success rate, completion time or energy consumption. Other behaviors – e.g., collision avoidance, trajectory smoothness or motion legibility – are typically emergent but equally important for safe and trustworthy deployment. Designing an objective which optimizes every aspect of robot behavior is hard. In this paper, we advocate for systematic analysis of a wide array of behaviors for holistic understanding of robot controllers and, to this end, propose a framework, RoCUS, which uses Bayesian posterior sampling to find situations where the robot controller exhibits user-specified behaviors, such as highly jerky motions. We use RoCUS to analyze three controller classes (deep learning models, rapidly exploring random trees and dynamical system formulations) on two domains (2D navigation and a 7 degree-of-freedom arm reaching), and uncover insights to further our understanding of these controllers and ultimately improve their designs. }\n}\n\n\n%% 2021 %%\n
\n
\n\n\n
\n As robots are deployed in complex situations, engineers and end users must develop a holistic understanding of their behaviors, capabilities, and limitations. Some behaviors are directly optimized by the objective function. They often include success rate, completion time or energy consumption. Other behaviors – e.g., collision avoidance, trajectory smoothness or motion legibility – are typically emergent but equally important for safe and trustworthy deployment. Designing an objective which optimizes every aspect of robot behavior is hard. In this paper, we advocate for systematic analysis of a wide array of behaviors for holistic understanding of robot controllers and, to this end, propose a framework, RoCUS, which uses Bayesian posterior sampling to find situations where the robot controller exhibits user-specified behaviors, such as highly jerky motions. We use RoCUS to analyze three controller classes (deep learning models, rapidly exploring random trees and dynamical system formulations) on two domains (2D navigation and a 7 degree-of-freedom arm reaching), and uncover insights to further our understanding of these controllers and ultimately improve their designs. \n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Provably Safe and Efficient Motion Planning with Uncertain Human Dynamics.\n \n \n \n \n\n\n \n Li, S.; Figueroa, N.; Shah, A.; and Shah, J. A.\n\n\n \n\n\n\n In Proceedings of Robotics: Science and Systems, Virtual, July 2021. \n \n\n\n\n
\n\n\n\n \n \n \"ProvablyPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 29 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{Li-RSS-21, \n    author    = {Li, Shen and Figueroa, Nadia and Shah, Ankit and Shah, Julie A. },  \n    title     = {{Provably Safe and Efficient Motion Planning with Uncertain Human Dynamics}}, \n    booktitle = {Proceedings of Robotics: Science and Systems}, \n    year      = {2021}, \n    address   = {Virtual}, \n    month     = {July}, \n    doi       = {10.15607/RSS.2021.XVII.050},\n    abstract  = {Ensuring human safety without unnecessarily impacting task efficiency during human-robot interactive manipulation tasks is a critical challenge. In this work; we formally define human physical safety as collision avoidance or safe impact in the event of a collision. We developed a motion planner that theoretically guarantees safety; with a high probability; under the uncertainty in human dynamic models. Our two-pronged definition of safety is able to unlock the planner's potential in finding efficient plans even when collision avoidance is nearly impossible. The improved efficiency is empirically demonstrated in both a simulated goal-reaching domain and a real-world robot-assisted dressing domain. We provide a unified view of two approaches to safe human-robot interaction: human-aware motion planners that use predictive human models and reactive controllers that compliantly handle collisions.},\n    url = {https://safe-dressing.github.io}, \n    link = {}\n} \n\n
\n
\n\n\n
\n Ensuring human safety without unnecessarily impacting task efficiency during human-robot interactive manipulation tasks is a critical challenge. In this work; we formally define human physical safety as collision avoidance or safe impact in the event of a collision. We developed a motion planner that theoretically guarantees safety; with a high probability; under the uncertainty in human dynamic models. Our two-pronged definition of safety is able to unlock the planner's potential in finding efficient plans even when collision avoidance is nearly impossible. The improved efficiency is empirically demonstrated in both a simulated goal-reaching domain and a real-world robot-assisted dressing domain. We provide a unified view of two approaches to safe human-robot interaction: human-aware motion planners that use predictive human models and reactive controllers that compliantly handle collisions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Real-Time Self-Collision Avoidance in Joint Space for Humanoid Robots.\n \n \n \n \n\n\n \n Koptev, M.; Figueroa, N.; and Billard, A.\n\n\n \n\n\n\n IEEE Robotics and Automation Letters, 6(2): 1240-1247. 2021.\n [Selected for presentation in ICRA 2021]\n\n\n\n
\n\n\n\n \n \n \"Real-TimePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 10 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{Koptev:RAL:2021,\n  author={M. {Koptev} and N. {Figueroa} and A. {Billard}},\n  journal={IEEE Robotics and Automation Letters}, \n  title={Real-Time Self-Collision Avoidance in Joint Space for Humanoid Robots}, \n  note={[Selected for presentation in ICRA 2021]},\n  year={2021},\n  volume={6},\n  number={2},\n  pages={1240-1247},\n  doi={10.1109/LRA.2021.3057024},\n  url={https://ieeexplore.ieee.org/document/9345975}, \n  abstract={In this letter, we propose a real-time self-collision avoidance approach for whole-body humanoid robot control. To achieve this, we learn the feasible regions of control in the humanoid's joint space as smooth self-collision boundary functions. Collision-free motions are generated online by treating the learned boundary functions as constraints in a Quadratic Program based Inverse Kinematic solver. As the geometrical complexity of a humanoid robot joint space grows with the number of degrees-of-freedom (DoF), learning computationally efficient and accurate boundary functions is challenging. We address this by partitioning the robot model into multiple lower-dimensional submodels. We compare performance of several state-of-the-art machine learning techniques to learn such boundary functions. Our approach is validated on the 29-DoF iCub humanoid robot, demonstrating highly accurate real-time self-collision avoidance.}}\n\n\n%% 2020 %%\n
\n
\n\n\n
\n In this letter, we propose a real-time self-collision avoidance approach for whole-body humanoid robot control. To achieve this, we learn the feasible regions of control in the humanoid's joint space as smooth self-collision boundary functions. Collision-free motions are generated online by treating the learned boundary functions as constraints in a Quadratic Program based Inverse Kinematic solver. As the geometrical complexity of a humanoid robot joint space grows with the number of degrees-of-freedom (DoF), learning computationally efficient and accurate boundary functions is challenging. We address this by partitioning the robot model into multiple lower-dimensional submodels. We compare performance of several state-of-the-art machine learning techniques to learn such boundary functions. Our approach is validated on the 29-DoF iCub humanoid robot, demonstrating highly accurate real-time self-collision avoidance.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n A Dynamical System Approach for Adaptive Grasping, Navigation and Co-Manipulation with Humanoid Robots.\n \n \n \n \n\n\n \n Figueroa, N.; Faraji, S.; Koptev, M.; and Billard, A.\n\n\n \n\n\n\n In IEEE International Conference on Robotics and Automation (ICRA), pages 7676-7682, 2020. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 21 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{9197038,\n  author={N. {Figueroa} and S. {Faraji} and M. {Koptev} and A. {Billard}},\n  booktitle={IEEE International Conference on Robotics and Automation (ICRA)}, \n  title={A Dynamical System Approach for Adaptive Grasping, Navigation and Co-Manipulation with Humanoid Robots}, \n  year={2020},\n  volume={},\n  number={},\n  pages={7676-7682},\n  doi={10.1109/ICRA40945.2020.9197038},\n  url={https://ieeexplore.ieee.org/document/9197038},\n  abstract={In this paper, we present an integrated approach that provides compliant control of an iCub humanoid robot and adaptive reaching, grasping, navigating and co-manipulating capabilities. We use state-dependent dynamical systems (DS) to (i) coordinate and drive the robots hands (in both position and orientation) to grasp an object using an intermediate virtual object, and (ii) drive the robot's base while walking/navigating. The use of DS as motion generators allows us to adapt smoothly as the object moves and to re-plan on-line motion of the arms and body to reach the object's new location. The desired motion generated by the DS are used in combination with a whole-body compliant control strategy that absorbs perturbations while walking and offers compliant behaviors for grasping and manipulation tasks. Further, the desired dynamics for the arm and body can be learned from demonstrations. By integrating these components, we achieve unprecedented adaptive behaviors for whole body manipulation. We showcase this in simulations and real-world experiments where iCub robots (i) walk-to-grasp objects, (ii) follow a human (or another iCub) through interaction and (iii) learn to navigate or comanipulate an object from human guided demonstrations; whilst being robust to changing targets and perturbations}\n}\n\n\n\n
\n
\n\n\n
\n In this paper, we present an integrated approach that provides compliant control of an iCub humanoid robot and adaptive reaching, grasping, navigating and co-manipulating capabilities. We use state-dependent dynamical systems (DS) to (i) coordinate and drive the robots hands (in both position and orientation) to grasp an object using an intermediate virtual object, and (ii) drive the robot's base while walking/navigating. The use of DS as motion generators allows us to adapt smoothly as the object moves and to re-plan on-line motion of the arms and body to reach the object's new location. The desired motion generated by the DS are used in combination with a whole-body compliant control strategy that absorbs perturbations while walking and offers compliant behaviors for grasping and manipulation tasks. Further, the desired dynamics for the arm and body can be learned from demonstrations. By integrating these components, we achieve unprecedented adaptive behaviors for whole body manipulation. We showcase this in simulations and real-world experiments where iCub robots (i) walk-to-grasp objects, (ii) follow a human (or another iCub) through interaction and (iii) learn to navigate or comanipulate an object from human guided demonstrations; whilst being robust to changing targets and perturbations\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Geometric Invariance of Covariance Matrices for Unsupervised Clustering, Segmentation and Action Discovery in Robotic Applications.\n \n \n \n\n\n \n Figueroa, N.; and Billard, A.\n\n\n \n\n\n\n In preparation for re-submission to the Journal of Machine Learning Research (JMLR), 2019.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@MISC{Figueroa:JMLR:2019,\n  author       = {Figueroa, N. and Billard, A.},\n  title        = {Geometric Invariance of Covariance Matrices for Unsupervised Clustering, Segmentation and Action Discovery in Robotic Applications},\n  howpublished = {In preparation for re-submission to the Journal of Machine Learning Research (JMLR)},\n  year         = {2019},\n  abstract = {In this paper, we introduce a novel distance in the space of covariance matrices that is invariant to geometric non-deforming transformations. We refer to it as the Spectral Polytope Covariance Matrix (SPCM) distance. We prove that it is a semi-metric capable of measuring the similarity between shapes of Gaussian distributions, covariance matrices and ellipsoids; all of which are symmetric positive definite (SPD) matrices. Such a distance is desirable in data-driven robotics applications where data represented by SPD matrices must be grouped or clustered, yet is collected in different unknown frames of reference or with different timing and scale. We thus propose a geometric invariant clustering approach which leverages the SPCM distance with vector space embeddings for SPD matrices and distance-dependent Bayesian non-parametric mixture model. We show that our proposed geometric invariant clustering algorithm outperforms state-of-the-art SPD matrix clustering schemes on a variety of robotics datasets. Further, we offer an algorithmic coupling between our clustering scheme and a Bayesian non-parametric Hidden Markov Model. This coupling allows for automatic segmentation and similar action discovery in sequential tasks demonstrated from unconstrained human motions encompassing wiping, polishing, dough rolling and peeling.}\n}\n\n\n
\n
\n\n\n
\n In this paper, we introduce a novel distance in the space of covariance matrices that is invariant to geometric non-deforming transformations. We refer to it as the Spectral Polytope Covariance Matrix (SPCM) distance. We prove that it is a semi-metric capable of measuring the similarity between shapes of Gaussian distributions, covariance matrices and ellipsoids; all of which are symmetric positive definite (SPD) matrices. Such a distance is desirable in data-driven robotics applications where data represented by SPD matrices must be grouped or clustered, yet is collected in different unknown frames of reference or with different timing and scale. We thus propose a geometric invariant clustering approach which leverages the SPCM distance with vector space embeddings for SPD matrices and distance-dependent Bayesian non-parametric mixture model. We show that our proposed geometric invariant clustering algorithm outperforms state-of-the-art SPD matrix clustering schemes on a variety of robotics datasets. Further, we offer an algorithmic coupling between our clustering scheme and a Bayesian non-parametric Hidden Markov Model. This coupling allows for automatic segmentation and similar action discovery in sequential tasks demonstrated from unconstrained human motions encompassing wiping, polishing, dough rolling and peeling.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n From High-Level to Low-Level Robot Learning of Complex Tasks: Leveraging Priors, Metrics and Dynamical Systems.\n \n \n \n \n\n\n \n Figueroa, N.\n\n\n \n\n\n\n Ph.D. Thesis, EPFL, Lausanne, Switzerland, 2019.\n \n\n\n\n
\n\n\n\n \n \n \"FromPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@phdthesis{Figueroa:PhD:2019,\n  author = {Figueroa, Nadia},\n  advisor = {Billard, Aude},\n  title = {From High-Level to Low-Level Robot Learning of Complex Tasks: Leveraging Priors, Metrics and Dynamical Systems},\n  year = {2019},\n  publisher = {EPFL},\n  address = {Lausanne, Switzerland},\n  url = {http://infoscience.epfl.ch/record/270640},\n  doi = {10.5075/epfl-thesis-9631},\n  school = {EPFL},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Learning Augmented Joint-Space Task-Oriented Dynamical Systems: A Linear Parameter Varying and Synergetic Control Approach.\n \n \n \n \n\n\n \n Shavit, Y.; Figueroa, N.; Mirrazavi Salehian, S. S.; and Billard, A.\n\n\n \n\n\n\n IEEE Robotics and Automation Letters, 3(3): 2718-2725. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"LearningPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{8355581,\n  author={Y. {Shavit} and N. {Figueroa} and Mirrazavi Salehian, Seyed Sina and A. {Billard}},\n  journal={IEEE Robotics and Automation Letters}, \n  title={Learning Augmented Joint-Space Task-Oriented Dynamical Systems: A Linear Parameter Varying and Synergetic Control Approach}, \n  year={2018},\n  volume={3},\n  number={3},\n  pages={2718-2725},\n  doi={10.1109/LRA.2018.2833497},\n  url={https://ieeexplore.ieee.org/document/8355581},\n  abstract={In this letter, we propose an asymptotically stable joint-space dynamical system (DS) that captures desired behaviors in joint-space while converging toward a task-space attractor in both position and orientation. To encode joint-space behaviors while meeting the stability criteria, we propose a DS constructed as a linear parameter varying system combining different behavior synergies and provide a method for learning these synergy matrices from demonstrations. Specifically, we use dimensionality reduction to find a low-dimensional embedding space for modulating joint synergies, and then estimate the parameters of the corresponding synergies by solving a convex semidefinite optimization problem that minimizes the joint velocity prediction error from the demonstrations. Our proposed approach is empirically validated on a variety of motions that reach a target in position and orientation, while following a desired joint-space behavior.}\n}\n\n
\n
\n\n\n
\n In this letter, we propose an asymptotically stable joint-space dynamical system (DS) that captures desired behaviors in joint-space while converging toward a task-space attractor in both position and orientation. To encode joint-space behaviors while meeting the stability criteria, we propose a DS constructed as a linear parameter varying system combining different behavior synergies and provide a method for learning these synergy matrices from demonstrations. Specifically, we use dimensionality reduction to find a low-dimensional embedding space for modulating joint synergies, and then estimate the parameters of the corresponding synergies by solving a convex semidefinite optimization problem that minimizes the joint velocity prediction error from the demonstrations. Our proposed approach is empirically validated on a variety of motions that reach a target in position and orientation, while following a desired joint-space behavior.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Unified Framework for Coordinated Multi-Arm Motion Planning.\n \n \n \n \n\n\n \n Mirrazavi Salehian, S. S.; Figueroa, N.; and Billard, A.\n\n\n \n\n\n\n The International Journal of Robotics Research, 37(10): 1205-1232. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{doi:10.1177/0278364918765952,\nauthor = {Mirrazavi Salehian, Seyed Sina and Figueroa, Nadia and Billard, Aude},\ntitle ={A Unified Framework for Coordinated Multi-Arm Motion Planning},\njournal = {The International Journal of Robotics Research},\nvolume = {37},\nnumber = {10},\npages = {1205-1232},\nyear = {2018},\ndoi = {10.1177/0278364918765952},\nURL = { \n        https://doi.org/10.1177/0278364918765952\n    \n},\neprint = { \n        https://doi.org/10.1177/0278364918765952\n    \n}\n,\n    abstract = { Coordination is essential in the design of dynamic control strategies for multi-arm robotic systems. Given the complexity of the task and dexterity of the system, coordination constraints can emerge from different levels of planning and control. Primarily, one must consider task-space coordination, where the robots must coordinate with each other, with an object or with a target of interest. Coordination is also necessary in joint space, as the robots should avoid self-collisions at any time. We provide such joint-space coordination by introducing a centralized inverse kinematics (IK) solver under self-collision avoidance constraints, formulated as a quadratic program and solved in real-time. The space of free motion is modeled through a sparse non-linear kernel classification method in a data-driven learning approach. Moreover, we provide multi-arm task-space coordination for both synchronous or asynchronous behaviors. We define a synchronous behavior as that in which the robot arms must coordinate with each other and with a moving object such that they reach for it in synchrony. In contrast, an asynchronous behavior allows for each robot to perform independent point-to-point reaching motions. To transition smoothly from asynchronous to synchronous behaviors and vice versa, we introduce the notion of synchronization allocation. We show how this allocation can be controlled through an external variable, such as the location of the object to be manipulated. Both behaviors and their synchronization allocation are encoded in a single dynamical system. We validate our framework on a dual-arm robotic system and demonstrate that the robots can re-synchronize and adapt the motion of each arm while avoiding self-collision within milliseconds. The speed of control is exploited to intercept fast moving objects whose motion cannot be predicted accurately. }\n}\n\n
\n
\n\n\n
\n Coordination is essential in the design of dynamic control strategies for multi-arm robotic systems. Given the complexity of the task and dexterity of the system, coordination constraints can emerge from different levels of planning and control. Primarily, one must consider task-space coordination, where the robots must coordinate with each other, with an object or with a target of interest. Coordination is also necessary in joint space, as the robots should avoid self-collisions at any time. We provide such joint-space coordination by introducing a centralized inverse kinematics (IK) solver under self-collision avoidance constraints, formulated as a quadratic program and solved in real-time. The space of free motion is modeled through a sparse non-linear kernel classification method in a data-driven learning approach. Moreover, we provide multi-arm task-space coordination for both synchronous or asynchronous behaviors. We define a synchronous behavior as that in which the robot arms must coordinate with each other and with a moving object such that they reach for it in synchrony. In contrast, an asynchronous behavior allows for each robot to perform independent point-to-point reaching motions. To transition smoothly from asynchronous to synchronous behaviors and vice versa, we introduce the notion of synchronization allocation. We show how this allocation can be controlled through an external variable, such as the location of the object to be manipulated. Both behaviors and their synchronization allocation are encoded in a single dynamical system. We validate our framework on a dual-arm robotic system and demonstrate that the robots can re-synchronize and adapt the motion of each arm while avoiding self-collision within milliseconds. The speed of control is exploited to intercept fast moving objects whose motion cannot be predicted accurately. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Physically-Consistent Bayesian Non-Parametric Mixture Model for Dynamical System Learning.\n \n \n \n \n\n\n \n Figueroa, N.; and Billard, A.\n\n\n \n\n\n\n In Billard, A.; Dragan, A.; Peters, J.; and Morimoto, J., editor(s), volume 87, of Proceedings of Machine Learning Research, pages 927–946, 29–31 Oct 2018. PMLR\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Figueroa:CORL:2018, title = {A Physically-Consistent Bayesian Non-Parametric Mixture Model for Dynamical System Learning}, author = {Figueroa, Nadia and Billard, Aude}, pages = {927--946}, year = {2018}, editor = {Aude Billard and Anca Dragan and Jan Peters and Jun Morimoto}, volume = {87}, series = {Proceedings of Machine Learning Research}, address = {}, month = {29--31 Oct}, publisher = {PMLR}, pdf = {http://proceedings.mlr.press/v87/figueroa18a/figueroa18a.pdf}, url = {http://proceedings.mlr.press/v87/figueroa18a.html}, abstract = {We propose a physically-consistent Bayesian non-parametric approach for fitting Gaussian Mixture Models (GMM) to trajectory data. Physical-consistency of the GMM is ensured by imposing a prior on the component assignments biased by a novel similarity metric that leverages locality and directionality. The resulting GMM is then used to learn globally asymptotically stable Dynamical Systems (DS) via a Linear Parameter Varying (LPV) re-formulation. The proposed DS learning scheme accurately encodes challenging nonlinear motions automatically. Finally, a data-efficient incremental learning framework is introduced that encodes a DS from batches of trajectories, while preserving global stability. Our contributions are validated on 2D datasets and a variety of tasks that involve single-target complex motions with a KUKA LWR 4+ robot arm. } }\n\n
\n
\n\n\n
\n We propose a physically-consistent Bayesian non-parametric approach for fitting Gaussian Mixture Models (GMM) to trajectory data. Physical-consistency of the GMM is ensured by imposing a prior on the component assignments biased by a novel similarity metric that leverages locality and directionality. The resulting GMM is then used to learn globally asymptotically stable Dynamical Systems (DS) via a Linear Parameter Varying (LPV) re-formulation. The proposed DS learning scheme accurately encodes challenging nonlinear motions automatically. Finally, a data-efficient incremental learning framework is introduced that encodes a DS from batches of trajectories, while preserving global stability. Our contributions are validated on 2D datasets and a variety of tasks that involve single-target complex motions with a KUKA LWR 4+ robot arm. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Modeling Compositions of Impedance-based Primitives via Dynamical Systems.\n \n \n \n \n\n\n \n Figueroa, N.; and Billard, A.\n\n\n \n\n\n\n Proceedings of the ICRA 2018 Workshop on Cognitive Whole-Body Control for Compliant Robot Manipulation (COWB-COMP),6. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"ModelingPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{FigueroaFernandez:255463,\n      title = {Modeling Compositions of Impedance-based Primitives via  Dynamical Systems.},\n      author = {Figueroa, Nadia and Billard, Aude},\n      journal = {Proceedings of the ICRA 2018 Workshop on Cognitive Whole-Body  Control for Compliant Robot Manipulation (COWB-COMP)},\n      pages = {6},\n      year = {2018},\n      abstract = {In this work, we introduce a novel Dynamical System  (DS)-based approach for modeling complex and compliant  manipulation tasks, that are composed of a sequence of  action phases with different compliance requirements; i.e.  impedance primitives. We adopt a closed-loop (DS)-based  control architecture and present the Locally Active  Globally Stable (LAGS)-DS formulation. In LAGS-DS we seek  to model the whole task as a globally asymptotically stable  DS that has locally task-varying dynamics and smoothly  transit between them. These locally task-varying dynamics  represent the set of impedance primitives, hence, rather  than modeling the task as a discretization of impedance  primitives, we model it as a composition of impedance  primitives in a single DS-based controller. In this paper,  we present the theoretical background for this novel DS,  briefly describe the learning approach and provide 2D  simulations of LAGS-DS learned from toy data.},\n      url = {http://infoscience.epfl.ch/record/255463},\n}\n\n
\n
\n\n\n
\n In this work, we introduce a novel Dynamical System (DS)-based approach for modeling complex and compliant manipulation tasks, that are composed of a sequence of action phases with different compliance requirements; i.e. impedance primitives. We adopt a closed-loop (DS)-based control architecture and present the Locally Active Globally Stable (LAGS)-DS formulation. In LAGS-DS we seek to model the whole task as a globally asymptotically stable DS that has locally task-varying dynamics and smoothly transit between them. These locally task-varying dynamics represent the set of impedance primitives, hence, rather than modeling the task as a discretization of impedance primitives, we model it as a composition of impedance primitives in a single DS-based controller. In this paper, we present the theoretical background for this novel DS, briefly describe the learning approach and provide 2D simulations of LAGS-DS learned from toy data.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-Arm Self-Collision Avoidance: A Sparse Solution for a Big Data Problem.\n \n \n \n \n\n\n \n Figueroa, N.; Mirrazavi Salehian, S. S.; and Billard, A.\n\n\n \n\n\n\n Proceedings of the ICRA 2018 Third Machine Learning in Planning and Control of Robot Motion (MLPC) Workshop.,6. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"Multi-ArmPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{FigueroaFernandez:255462,\n      title = {Multi-Arm Self-Collision Avoidance: A Sparse Solution for  a Big Data Problem.},\n      author = {Figueroa, Nadia and Mirrazavi Salehian, Seyed Sina and Billard, Aude},\n      journal = {Proceedings of the ICRA 2018 Third Machine Learning in Planning and  Control of Robot Motion (MLPC) Workshop.},\n      pages = {6},\n      year = {2018},\n      abstract = {In this work, we propose a data-driven approach for  real-time self-collision avoidance in multi-arm systems.  The approach consists of modeling the regions in  joint-space that lead to collisions via a Self-Collision  Avoidance (SCA) boundary and use it as a constraint for a  centralized Inverse Kinematics (IK) solver. This problem is  particularly challenging as the dimensionality of the  joint-configurations is in the order of millions (for a  dual-arm system), while the IK solver must run within a  control loop of 2ms. Hence, an extremely sparse solution is  needed for this big data problem. The SCA region is modeled  through a sparse non-linear kernel classification method  that yields a runtime of less than 2ms (on a single thread  CPU process) and has a False Positive Rate (FPR)=1.5%. Code  for generating multi-arm datasets and learning the sparse  SCA boundary are available at:  https://github.com/nbfigueroa/SCA-Boundary-Learning},\n      url = {http://infoscience.epfl.ch/record/255462},\n}\n\n\n
\n
\n\n\n
\n In this work, we propose a data-driven approach for real-time self-collision avoidance in multi-arm systems. The approach consists of modeling the regions in joint-space that lead to collisions via a Self-Collision Avoidance (SCA) boundary and use it as a constraint for a centralized Inverse Kinematics (IK) solver. This problem is particularly challenging as the dimensionality of the joint-configurations is in the order of millions (for a dual-arm system), while the IK solver must run within a control loop of 2ms. Hence, an extremely sparse solution is needed for this big data problem. The SCA region is modeled through a sparse non-linear kernel classification method that yields a runtime of less than 2ms (on a single thread CPU process) and has a False Positive Rate (FPR)=1.5%. Code for generating multi-arm datasets and learning the sparse SCA boundary are available at: https://github.com/nbfigueroa/SCA-Boundary-Learning\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Transitioning with confidence during contact/non-contact scenarios.\n \n \n \n \n\n\n \n Mirrazavi Salehian, S. S.; Lin, H.; Figueroa, N.; Smith, J.; Mistry, M.; and Billard, A.\n\n\n \n\n\n\n In Proceedings of the IROS 2018 Workshop on Human-Robot Cooperation and Collaboration in Manipulation: Advancements and Challenges.,6. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"TransitioningPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{MirrazaviSalehian:257293,\n      title = {Transitioning with confidence during contact/non-contact scenarios},\n      author = {Mirrazavi Salehian, Seyed Sina and Lin, Hsiu-Chin and Figueroa, Nadia and Smith, Joshua and  Mistry, Michael and Billard, Aude},\n      journal = {In Proceedings of the IROS 2018 Workshop on Human-Robot Cooperation and Collaboration in Manipulation: Advancements and Challenges.},\n      pages = {6},\n      year = {2018},    \n      abstract = {In this work, we propose a dynamical system based strategy  for establishing a stable contact with convex shaped  surfaces during non-contact/contact scenarios. A contact is  called stable if the impact occurs only once and the robot  remains in contact with the surface after the impact.  Realizing a stable contact is particularly challenging as  the contact leaves a very short time-window for the robot  to react properly to the impact force. In this paper, we  propose a strategy consisting of locally modulating the  robot’s motion in a way that it aligns with the surface  before making the contact. We show theoretically and  empirically that by using the modulation framework, the  contact is stable and the robot stays in contact with the  surface after the first impact.},\n      url = {http://infoscience.epfl.ch/record/257293},\n}\n\n\n\n
\n
\n\n\n
\n In this work, we propose a dynamical system based strategy for establishing a stable contact with convex shaped surfaces during non-contact/contact scenarios. A contact is called stable if the impact occurs only once and the robot remains in contact with the surface after the impact. Realizing a stable contact is particularly challenging as the contact leaves a very short time-window for the robot to react properly to the impact force. In this paper, we propose a strategy consisting of locally modulating the robot’s motion in a way that it aligns with the surface before making the contact. We show theoretically and empirically that by using the modulation framework, the contact is stable and the robot stays in contact with the surface after the first impact.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Dynamical System-Based Motion Planning for Multi-Arm Systems: Reaching for Moving Objects.\n \n \n \n \n\n\n \n Mirrazavi Salehian, S. S.; Figueroa, N.; and Billard, A.\n\n\n \n\n\n\n In Proceedings of the Twenty-Sixth International Joint Conference on Artificial Intelligence, IJCAI-17, pages 4914–4918, 2017. \n \n\n\n\n
\n\n\n\n \n \n \"DynamicalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ijcai2017-693,\n  author    = {Mirrazavi Salehian, Seyed Sina and Figueroa, Nadia and Billard, Aude},\n  title     = {Dynamical System-Based Motion Planning for Multi-Arm Systems: Reaching for Moving Objects},\n  booktitle = {Proceedings of the Twenty-Sixth International Joint Conference on\n               Artificial Intelligence, {IJCAI-17}},\n  pages     = {4914--4918},\n  year      = {2017},\n  doi       = {10.24963/ijcai.2017/693},\n  url       = {https://doi.org/10.24963/ijcai.2017/693},\n  abstract  = {The use of coordinated multi-arm robotic systems allows to preform manipulations of heavy or bulky objects that would otherwise be infeasible for a single-arm robot. This paper concisely introduces our work on coordinated multi-arm control [Salehian et al., 2016a], where we proposed a virtual object based dynamical systems (DS) control law to generate autonomous and synchronized motions for a multi-arm robot system. We show theoretically and empirically that the multi-arm + virtual object system converges asymptotically to a moving object. The proposed framework is validated on a dual-arm robotic system. We demonstrate that it can re-synchronize and adapt the motion of each arm in a fraction of a second, even when the object’s motion is fast and not accurately predictable.}\n}\n\n
\n
\n\n\n
\n The use of coordinated multi-arm robotic systems allows to preform manipulations of heavy or bulky objects that would otherwise be infeasible for a single-arm robot. This paper concisely introduces our work on coordinated multi-arm control [Salehian et al., 2016a], where we proposed a virtual object based dynamical systems (DS) control law to generate autonomous and synchronized motions for a multi-arm robot system. We show theoretically and empirically that the multi-arm + virtual object system converges asymptotically to a moving object. The proposed framework is validated on a dual-arm robotic system. We demonstrate that it can re-synchronize and adapt the motion of each arm in a fraction of a second, even when the object’s motion is fast and not accurately predictable.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Learning Complex Manipulation Tasks from Heterogeneous and Unstructured Demonstrations.\n \n \n \n\n\n \n Figueroa, N.; and Billard, A.\n\n\n \n\n\n\n In Proceedings of Workshop on Synergies between Learning and Interaction. IEEE/RSJ International Conference on Intelligent Robots and Systems, 2017.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@MISC{Figueroa2017-ID988,\nauthor       = {Figueroa, N. and Billard, A.},\ntitle        = {Learning Complex Manipulation Tasks from Heterogeneous and Unstructured Demonstrations},\nhowpublished = {In Proceedings of Workshop on Synergies between Learning and Interaction. IEEE/RSJ International Conference on Intelligent Robots and Systems},\nyear         = {2017},\nabstract     = {Motivated by the current state-of-the-art in Robot Learning from Demonstration (LfD), in this paper, we tackle two central issues in the learning pipeline: namely, dealing with (1) heterogeneity and (2) unstructuredness in demonstrations of complex manipulation tasks. We build upon our previous work on transform-invariant segmentation and action discovery [1], to learn the underlying action sequence of tasks demonstrated in different reference frames or contexts. We then construct and parametrize a multi-phase task-space control architecture, boot-strapped by the segmented data and model parameters learned from the action discovery approach. Successful case studies of the proposed methodology are presented for uni/bi-manual cooking tasks demonstrated through kinesthetic teaching.}\n}\n\n\n
\n
\n\n\n
\n Motivated by the current state-of-the-art in Robot Learning from Demonstration (LfD), in this paper, we tackle two central issues in the learning pipeline: namely, dealing with (1) heterogeneity and (2) unstructuredness in demonstrations of complex manipulation tasks. We build upon our previous work on transform-invariant segmentation and action discovery [1], to learn the underlying action sequence of tasks demonstrated in different reference frames or contexts. We then construct and parametrize a multi-phase task-space control architecture, boot-strapped by the segmented data and model parameters learned from the action discovery approach. Successful case studies of the proposed methodology are presented for uni/bi-manual cooking tasks demonstrated through kinesthetic teaching.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Learning complex sequential tasks from demonstration: A pizza dough rolling case study.\n \n \n \n\n\n \n Figueroa, N.; Ureche, A. L. P.; and Billard, A.\n\n\n \n\n\n\n In 2016 11th ACM/IEEE International Conference on Human-Robot Interaction (HRI), pages 611-612, 2016. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{7451881,\n  author={N. {Figueroa} and A. L. P. {Ureche} and A. {Billard}},\n  booktitle={2016 11th ACM/IEEE International Conference on Human-Robot Interaction (HRI)}, \n  title={Learning complex sequential tasks from demonstration: A pizza dough rolling case study}, \n  year={2016},\n  volume={},\n  number={},\n  pages={611-612},\n  doi={10.1109/HRI.2016.7451881}, \n  abstract={This paper introduces a hierarchical framework that is capable of learning complex sequential tasks from human demonstrations through kinesthetic teaching, with minimal human intervention. Via an automatic task segmentation and action primitive discovery algorithm, we are able to learn both the high-level task decomposition (into action primitives), as well as low-level motion parameterizations for each action, in a fully integrated framework. In order to reach the desired task goal, we encode a task metric based on the evolution of the manipulated object during demonstration, and use it to sequence and parametrize each action primitive. We illustrate this framework with a pizza dough rolling task and show how the learned hierarchical knowledge is directly used for autonomous robot execution.}}\n\n\n
\n
\n\n\n
\n This paper introduces a hierarchical framework that is capable of learning complex sequential tasks from human demonstrations through kinesthetic teaching, with minimal human intervention. Via an automatic task segmentation and action primitive discovery algorithm, we are able to learn both the high-level task decomposition (into action primitives), as well as low-level motion parameterizations for each action, in a fully integrated framework. In order to reach the desired task goal, we encode a task metric based on the evolution of the manipulated object during demonstration, and use it to sequence and parametrize each action primitive. We illustrate this framework with a pizza dough rolling task and show how the learned hierarchical knowledge is directly used for autonomous robot execution.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Open robotics research using web-based knowledge services.\n \n \n \n\n\n \n Beetz, M.; Beßler, D.; Winkler, J.; Worch, J.; Bálint-Benczédi, F.; Bartels, G.; Billard, A.; Bozcuoğlu, A. K.; Zhou Fang; Figueroa, N.; Haidu, A.; Langer, H.; Maldonado, A.; Ureche, A. L. P.; Tenorth, M.; and Wiedemeyer, T.\n\n\n \n\n\n\n In 2016 IEEE International Conference on Robotics and Automation (ICRA), pages 5380-5387, May 2016. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@INPROCEEDINGS{7487749,\n  author={M. {Beetz} and D. {Beßler} and J. {Winkler} and J. {Worch} and F. {Bálint-Benczédi} and G. {Bartels} and A. {Billard} and A. K. {Bozcuoğlu} and  {Zhou Fang} and N. {Figueroa} and A. {Haidu} and H. {Langer} and A. {Maldonado} and A. L. P. {Ureche} and M. {Tenorth} and T. {Wiedemeyer}},\n  booktitle={2016 IEEE International Conference on Robotics and Automation (ICRA)}, \n  title={Open robotics research using web-based knowledge services}, \n  year={2016},\n  volume={},\n  number={},\n  pages={5380-5387},\n  abstract={In this paper we discuss how the combination of modern technologies in “big data” storage and management, knowledge representation and processing, cloud-based computation, and web technology can help the robotics community to establish and strengthen an open research discipline. We describe how we made the demonstrator of a EU project review openly available to the research community. Specifically, we recorded episodic memories with rich semantic annotations during a pizza preparation experiment in autonomous robot manipulation. Afterwards, we released them as an open knowledge base using the cloud- and web-based robot knowledge service OPENEASE. We discuss several ways on how this open data can be used to validate our experimental reports and to tackle novel challenging research problems.},\n  keywords={cloud computing;control engineering computing;knowledge representation;manipulators;mobile robots;Web services;open robotics research;Web-based knowledge services;Big Data storage;Big Data management;knowledge representation;knowledge processing;cloud-based computation;Web technology;robotics community;open research discipline;EU project;pizza preparation experiment;autonomous robot manipulation;open knowledge base;cloud-based robot knowledge service;Web-based robot knowledge service;OPENEASE;Robot sensing systems;Knowledge based systems;Knowledge engineering;Semantics;Data visualization;Big data},\n  doi={10.1109/ICRA.2016.7487749},\n  ISSN={},\n  month={May},}\n\n\n
\n
\n\n\n
\n In this paper we discuss how the combination of modern technologies in “big data” storage and management, knowledge representation and processing, cloud-based computation, and web technology can help the robotics community to establish and strengthen an open research discipline. We describe how we made the demonstrator of a EU project review openly available to the research community. Specifically, we recorded episodic memories with rich semantic annotations during a pizza preparation experiment in autonomous robot manipulation. Afterwards, we released them as an open knowledge base using the cloud- and web-based robot knowledge service OPENEASE. We discuss several ways on how this open data can be used to validate our experimental reports and to tackle novel challenging research problems.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Coordinated multi-arm motion planning: Reaching for moving objects in the face of uncertainty.\n \n \n \n\n\n \n Mirrazavi Salehian, S. S.; Figueroa, N.; and Billard, A.\n\n\n \n\n\n\n In Proceedings of Robotics: Science and Systems, AnnArbor, Michigan, June 2016. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{Salehian-RSS-16, \n    AUTHOR    = {Mirrazavi Salehian, Seyed Sina and Figueroa, Nadia and Billard, Aude}, \n    TITLE     = {Coordinated multi-arm motion planning: Reaching for moving objects in the face of uncertainty}, \n    BOOKTITLE = {Proceedings of Robotics: Science and Systems}, \n    YEAR      = {2016}, \n    ADDRESS   = {AnnArbor, Michigan}, \n    MONTH     = {June}, \n    DOI       = {10.15607/RSS.2016.XII.019}, \n    abstract  = {Coordinated control strategies for multi-robot systems are necessary for tasks that cannot be executed by a single robot. This encompasses tasks where the workspace of the robot is too small or where the load is too heavy for one robot to handle. Using multiple robots makes the task feasible by extending the workspace and/or increase the payload of the overall robotic system. In this paper, we consider two instances of such task: a co-worker scenario in which a human hands over a large object to a robot; intercepting a large flying object. The problem is made difficult as the pick-up/intercept motions must take place while the object is in motion and because the object's motion is not deterministic. The challenge is then to adapt the motion of the robotic arms in coordination with one another and with the object. Determining the pick-up/intercept point is done by taking into account the workspace of the multi-arm system and is continuously recomputed to adapt to change in the object's trajectory. We propose a dynamical systems (DS) based control law to generate autonomous and synchronized motions for a multi-arm robot system in the task of reaching for a moving object. We show theoretically that the resulting DS coordinates the motion of the robots with each other and with the object, while the system remains stable. We validate our approach on a dual-arm robotic system and demonstrate that it can re-synchronize and adapt the motion of each arm in synchrony in a fraction of seconds, even when the motion of the object is fast and not accurately predictable.}\n} \n  \n\n
\n
\n\n\n
\n Coordinated control strategies for multi-robot systems are necessary for tasks that cannot be executed by a single robot. This encompasses tasks where the workspace of the robot is too small or where the load is too heavy for one robot to handle. Using multiple robots makes the task feasible by extending the workspace and/or increase the payload of the overall robotic system. In this paper, we consider two instances of such task: a co-worker scenario in which a human hands over a large object to a robot; intercepting a large flying object. The problem is made difficult as the pick-up/intercept motions must take place while the object is in motion and because the object's motion is not deterministic. The challenge is then to adapt the motion of the robotic arms in coordination with one another and with the object. Determining the pick-up/intercept point is done by taking into account the workspace of the multi-arm system and is continuously recomputed to adapt to change in the object's trajectory. We propose a dynamical systems (DS) based control law to generate autonomous and synchronized motions for a multi-arm robot system in the task of reaching for a moving object. We show theoretically that the resulting DS coordinates the motion of the robots with each other and with the object, while the system remains stable. We validate our approach on a dual-arm robotic system and demonstrate that it can re-synchronize and adapt the motion of each arm in synchrony in a fraction of seconds, even when the motion of the object is fast and not accurately predictable.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2015\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n An Elicitation Study on Gesture Preferences and Memorability Toward a Practical Hand-Gesture Vocabulary for Smart Televisions.\n \n \n \n\n\n \n Dong, H.; Danesh, A.; Figueroa, N.; and El Saddik, A.\n\n\n \n\n\n\n IEEE Access, 3: 543-555. 2015.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@ARTICLE{7106566,\n  author={{Dong}, H. and {Danesh}, A. and {Figueroa}, N. and El Saddik, Abdulmotaleb},\n  journal={IEEE Access}, \n  title={An Elicitation Study on Gesture Preferences and Memorability Toward a Practical Hand-Gesture Vocabulary for Smart Televisions}, \n  year={2015},\n  volume={3},\n  number={},\n  pages={543-555},\n  doi={10.1109/ACCESS.2015.2432679}, \n  abstract={With the introduction of new depth-sensing technologies, interactive hand-gesture devices (such as smart televisions and displays) have been rapidly emerging. However, given the lack of a common vocabulary, most hand-gesture control commands are device-specific, burdening the user into learning different vocabularies for different devices. In order for hand gestures to become a natural communication for users with interactive devices, a standardized interactive hand-gesture vocabulary is necessary. Recently, researchers have approached this issue by conducting studies that elicit gesture vocabularies based on users' preferences. Nonetheless, a universal vocabulary has yet to be proposed. In this paper, a thorough design methodology for achieving such a universal hand-gesture vocabulary is presented. The methodology is derived from the work of Wobbrock et al. and includes four steps: 1) a preliminary survey eliciting users' attitudes; 2) a broader user survey in order to construct the universal vocabulary via results of the preliminary survey; 3) an evaluation test to study the implementation of the vocabulary; and 4) a memory test to analyze the memorability of the vocabulary. The proposed vocabulary emerged from this methodology achieves an agreement score exceeding those of the existing studies. Moreover, the results of the memory test show that, within a 15-min training session, the average accuracy of the proposed vocabulary is 90.71%. Despite the size of the proposed gesture vocabulary being smaller than that of similar work, it shares the same functionality, is easier to remember and can be integrated with smart TVs, interactive digital displays, and so on.}}\n\n
\n
\n\n\n
\n With the introduction of new depth-sensing technologies, interactive hand-gesture devices (such as smart televisions and displays) have been rapidly emerging. However, given the lack of a common vocabulary, most hand-gesture control commands are device-specific, burdening the user into learning different vocabularies for different devices. In order for hand gestures to become a natural communication for users with interactive devices, a standardized interactive hand-gesture vocabulary is necessary. Recently, researchers have approached this issue by conducting studies that elicit gesture vocabularies based on users' preferences. Nonetheless, a universal vocabulary has yet to be proposed. In this paper, a thorough design methodology for achieving such a universal hand-gesture vocabulary is presented. The methodology is derived from the work of Wobbrock et al. and includes four steps: 1) a preliminary survey eliciting users' attitudes; 2) a broader user survey in order to construct the universal vocabulary via results of the preliminary survey; 3) an evaluation test to study the implementation of the vocabulary; and 4) a memory test to analyze the memorability of the vocabulary. The proposed vocabulary emerged from this methodology achieves an agreement score exceeding those of the existing studies. Moreover, the results of the memory test show that, within a 15-min training session, the average accuracy of the proposed vocabulary is 90.71%. Despite the size of the proposed gesture vocabulary being smaller than that of similar work, it shares the same functionality, is easier to remember and can be integrated with smart TVs, interactive digital displays, and so on.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Combined Approach Toward Consistent Reconstructions of Indoor Spaces Based on 6D RGB-D Odometry and KinectFusion.\n \n \n \n \n\n\n \n Figueroa, N.; Dong, H.; and El Saddik, A.\n\n\n \n\n\n\n ACM Trans. Intell. Syst. Technol., 6(2). March 2015.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{10.1145/2629673, author = {Figueroa, Nadia and Dong, Haiwei and El Saddik, Abdulmotaleb}, title = {A Combined Approach Toward Consistent Reconstructions of Indoor Spaces Based on 6D RGB-D Odometry and KinectFusion}, year = {2015}, issue_date = {May 2015}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, volume = {6}, number = {2}, issn = {2157-6904}, url = {https://doi.org/10.1145/2629673}, doi = {10.1145/2629673}, abstract = {We propose a 6D RGB-D odometry approach that finds the relative camera pose between consecutive RGB-D frames by keypoint extraction and feature matching both on the RGB and depth image planes. Furthermore, we feed the estimated pose to the highly accurate KinectFusion algorithm, which uses a fast ICP (Iterative Closest Point) to fine-tune the frame-to-frame relative pose and fuse the depth data into a global implicit surface. We evaluate our method on a publicly available RGB-D SLAM benchmark dataset by Sturm et al. The experimental results show that our proposed reconstruction method solely based on visual odometry and KinectFusion outperforms the state-of-the-art RGB-D SLAM system accuracy. Moreover, our algorithm outputs a ready-to-use polygon mesh (highly suitable for creating 3D virtual worlds) without any postprocessing steps.}, journal = {ACM Trans. Intell. Syst. Technol.}, month = mar, articleno = {14}, numpages = {10}, keywords = {Indoor mapping, kinect, benchmark datasets, evaluation} }\n\n
\n
\n\n\n
\n We propose a 6D RGB-D odometry approach that finds the relative camera pose between consecutive RGB-D frames by keypoint extraction and feature matching both on the RGB and depth image planes. Furthermore, we feed the estimated pose to the highly accurate KinectFusion algorithm, which uses a fast ICP (Iterative Closest Point) to fine-tune the frame-to-frame relative pose and fuse the depth data into a global implicit surface. We evaluate our method on a publicly available RGB-D SLAM benchmark dataset by Sturm et al. The experimental results show that our proposed reconstruction method solely based on visual odometry and KinectFusion outperforms the state-of-the-art RGB-D SLAM system accuracy. Moreover, our algorithm outputs a ready-to-use polygon mesh (highly suitable for creating 3D virtual worlds) without any postprocessing steps.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n ``Load balance''control for a humanoid musculoskeletal arm in table tennis movement.\n \n \n \n \n\n\n \n Dong, H.; Figueroa, N.; and El Saddik, A.\n\n\n \n\n\n\n International Journal of Control, Automation and Systems, 13(4): 887–896. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"``LoadPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{LoadBalance,\n  Abstract = {The aim of this paper is to propose a muscle control method for a humanoid musculoskeletal arm that mimics human muscle coordination from an engineering viewpoint. As muscle control is posed as a redundancy problem, the resulting muscle force corresponds to different desired control criteria (e.g., minimizing the total metabolic energy, minimizing muscle activity). In this research, the criterion we choose is ``load balance'', which is essential for reducing actuator demands on systems subject to repetitive and demanding tasks. In order to achieve a balanced force distribution throughout the muscles, we obtain a minimum-load muscle force by considering the acceleration contribution in both muscle and joint space. The orthogonal space of the minimum-load muscle force solution is designed to place the muscle force close to the midpoint of the muscle force limits. The proposed method is tested by tracking a table tennis movement of a real human subject. The results not only provide an explanation on the concepts of ``minimum-load''and ``balance-load'', but also show that the proposed method has advantageous properties such as computational efficiency and stability.},\n  Author = {Dong, Haiwei and Figueroa, Nadia and El Saddik, Abdulmotaleb},\n  Da = {2015/08/01},\n  Doi = {10.1007/s12555-014-0038-z},\n  Id = {Dong2015},\n  Isbn = {2005-4092},\n  Journal = {International Journal of Control, Automation and Systems},\n  Number = {4},\n  Pages = {887--896},\n  Title = {``Load balance''control for a humanoid musculoskeletal arm in table tennis movement},\n  Ty = {JOUR},\n  Url = {https://doi.org/10.1007/s12555-014-0038-z},\n  Volume = {13},\n  Year = {2015}}\n\n
\n
\n\n\n
\n The aim of this paper is to propose a muscle control method for a humanoid musculoskeletal arm that mimics human muscle coordination from an engineering viewpoint. As muscle control is posed as a redundancy problem, the resulting muscle force corresponds to different desired control criteria (e.g., minimizing the total metabolic energy, minimizing muscle activity). In this research, the criterion we choose is ``load balance'', which is essential for reducing actuator demands on systems subject to repetitive and demanding tasks. In order to achieve a balanced force distribution throughout the muscles, we obtain a minimum-load muscle force by considering the acceleration contribution in both muscle and joint space. The orthogonal space of the minimum-load muscle force solution is designed to place the muscle force close to the midpoint of the muscle force limits. The proposed method is tested by tracking a table tennis movement of a real human subject. The results not only provide an explanation on the concepts of ``minimum-load''and ``balance-load'', but also show that the proposed method has advantageous properties such as computational efficiency and stability.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive “load-distributed” muscle coordination method for kinematically redundant musculoskeletal humanoid systems.\n \n \n \n \n\n\n \n Dong, H.; Figueroa, N.; and El Saddik, A.\n\n\n \n\n\n\n Robotics and Autonomous Systems, 64: 59 - 69. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"AdaptivePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{DONG201559,\ntitle = "Adaptive “load-distributed” muscle coordination method for kinematically redundant musculoskeletal humanoid systems",\njournal = "Robotics and Autonomous Systems",\nvolume = "64",\npages = "59 - 69",\nyear = "2015",\nissn = "0921-8890",\ndoi = "https://doi.org/10.1016/j.robot.2014.10.020",\nurl = "http://www.sciencedirect.com/science/article/pii/S0921889014002413",\nauthor = "Haiwei Dong and Nadia Figueroa and Abdulmotaleb {El Saddik}",\nkeywords = "Tendon-driven system, Redundancy solution, Adaptive parameter updating",\nabstract = "Muscle force control of musculoskeletal humanoid systems has been studied for years in the motor control, biomechanics and robotics disciplines. However, the study of “load-distributed” muscle control has seldomly been contemplated. In this paper, we consider muscle force control as a problem of muscle coordination. We propose a general muscle coordination method for a system driven by agonist and antagonist muscles. In our method, a set of linear equations is derived by connecting the acceleration description in both joint and muscle space where the pseudo inverse solution to these equations provides an initial optimal muscle force distribution. Thereafter, we redistribute the forces throughout the muscles by deriving a gradient direction for muscle force. This allows the muscles to satisfy force constraints and generate a distribution of forces throughout all the muscles. Moreover, to ensure that our proposed method is adaptive to modeling errors, we have constructed an estimated system model, which is added to the system to represent the real plant. By updating the parameters of the estimated model based on prediction error, the estimated model approaches the real plant gradually in real-time. The overall proposed method is evaluated on a bending–stretching movement of a musculoskeletal arm. We used two models (arm with 6 and 10 muscles) to verify the method. The force distribution analysis verifies the “load-distribution” property of the computed muscle force. The efficiency comparison shows that the computational time does not increase significantly with the increase of muscle number. The tracking error statistics of the two models show the validity of the method."\n}\n\n\n
\n
\n\n\n
\n Muscle force control of musculoskeletal humanoid systems has been studied for years in the motor control, biomechanics and robotics disciplines. However, the study of “load-distributed” muscle control has seldomly been contemplated. In this paper, we consider muscle force control as a problem of muscle coordination. We propose a general muscle coordination method for a system driven by agonist and antagonist muscles. In our method, a set of linear equations is derived by connecting the acceleration description in both joint and muscle space where the pseudo inverse solution to these equations provides an initial optimal muscle force distribution. Thereafter, we redistribute the forces throughout the muscles by deriving a gradient direction for muscle force. This allows the muscles to satisfy force constraints and generate a distribution of forces throughout all the muscles. Moreover, to ensure that our proposed method is adaptive to modeling errors, we have constructed an estimated system model, which is added to the system to represent the real plant. By updating the parameters of the estimated model based on prediction error, the estimated model approaches the real plant gradually in real-time. The overall proposed method is evaluated on a bending–stretching movement of a musculoskeletal arm. We used two models (arm with 6 and 10 muscles) to verify the method. The force distribution analysis verifies the “load-distribution” property of the computed muscle force. The efficiency comparison shows that the computational time does not increase significantly with the increase of muscle number. The tracking error statistics of the two models show the validity of the method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An Elicitation Study on Gesture Attitudes and Preferences Towards an Interactive Hand-Gesture Vocabulary.\n \n \n \n \n\n\n \n Dong, H.; Figueroa, N.; and El Saddik, A.\n\n\n \n\n\n\n In Proceedings of the 23rd ACM International Conference on Multimedia, of MM '15, pages 999–1002, New York, NY, USA, 2015. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{10.1145/2733373.2806385,\nauthor = {Dong, Haiwei and Figueroa, Nadia and El Saddik, Abdulmotaleb},\ntitle = {An Elicitation Study on Gesture Attitudes and Preferences Towards an Interactive Hand-Gesture Vocabulary},\nyear = {2015},\nisbn = {9781450334594},\npublisher = {Association for Computing Machinery},\naddress = {New York, NY, USA},\nurl = {https://doi.org/10.1145/2733373.2806385},\ndoi = {10.1145/2733373.2806385},\nabstract = {With the introduction of new depth sensing technologies, interactive hand-gesture devices are rapidly emerging. However, the hand-gestures used in these devices do not follow a common vocabulary, making certain control command device-specific. In this paper we present an initial effort to create a standardized interactive hand-gesture vocabulary for the next generation of television applications. We conduct a user-elicitation study using a survey in order to define a common vocabulary for specific control commands, such as Volume up/down, Menu open/close, etc. This survey is entirely user-oriented and thus it has two phases. In the first phase, we ask open questions about specific commands. In the second phase, we use the answers suggested from the first phase to create a multiple choice questionnaire. Based on the results from the survey, we study the gesture attitudes and preferences between gender groups, and between age groups with a quantitative and qualitative statistical analysis. Finally, the hand-gesture vocabulary is derived after applying an agreement analysis on the user-elicited gestures. The proposed methodology for gesture set design is comparable with existing methodologies and yields higher agreement levels than relevant user-elicited studies in the field.},\nbooktitle = {Proceedings of the 23rd ACM International Conference on Multimedia},\npages = {999–1002},\nnumpages = {4},\nkeywords = {hand-gesture interaction, kinect, preferences and attitudes},\nlocation = {Brisbane, Australia},\nseries = {MM '15}\n}\n\n
\n
\n\n\n
\n With the introduction of new depth sensing technologies, interactive hand-gesture devices are rapidly emerging. However, the hand-gestures used in these devices do not follow a common vocabulary, making certain control command device-specific. In this paper we present an initial effort to create a standardized interactive hand-gesture vocabulary for the next generation of television applications. We conduct a user-elicitation study using a survey in order to define a common vocabulary for specific control commands, such as Volume up/down, Menu open/close, etc. This survey is entirely user-oriented and thus it has two phases. In the first phase, we ask open questions about specific commands. In the second phase, we use the answers suggested from the first phase to create a multiple choice questionnaire. Based on the results from the survey, we study the gesture attitudes and preferences between gender groups, and between age groups with a quantitative and qualitative statistical analysis. Finally, the hand-gesture vocabulary is derived after applying an agreement analysis on the user-elicited gestures. The proposed methodology for gesture set design is comparable with existing methodologies and yields higher agreement levels than relevant user-elicited studies in the field.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2014\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Towards consistent reconstructions of indoor spaces based on 6D RGB-D odometry and KinectFusion.\n \n \n \n\n\n \n Dong, H.; Figueroa, N.; and El Saddik, A.\n\n\n \n\n\n\n In 2014 IEEE/RSJ International Conference on Intelligent Robots and Systems, pages 1796-1803, 2014. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{6942798,\n  author={H. {Dong} and N. {Figueroa} and A. {El Saddik}},\n  booktitle={2014 IEEE/RSJ International Conference on Intelligent Robots and Systems}, \n  title={Towards consistent reconstructions of indoor spaces based on 6D RGB-D odometry and KinectFusion}, \n  year={2014},\n  volume={},\n  number={},\n  pages={1796-1803},\n  doi={10.1109/IROS.2014.6942798}, \n  abstract={We focus on generating consistent reconstructions of indoor spaces from a freely moving handheld RGB-D sensor, with the aim of creating virtual models that can be used for measuring and remodeling. We propose a novel 6D RGBD odometry approach that finds the relative camera pose between consecutive RGB-D frames by keypoint extraction and feature matching both on the RGB and depth image planes. Furthermore, we feed the estimated pose to the highly accurate KinectFusion algorithm, which uses a fast ICP (Iterative-Closest-Point) to fine-tune the frame-to-frame relative pose and fuse the Depth data into a global implicit surface. We evaluate our method on a publicly available RGB-D SLAM benchmark dataset by Sturm et al. The experimental results show that our proposed reconstruction method solely based on visual odometry and KinectFusion outperforms the state-of-the-art RGB-D SLAM system accuracy. Our algorithm outputs a ready-to-use polygon mesh (highly suitable for creating 3D virtual worlds) without any post-processing steps.}}\n\n
\n
\n\n\n
\n We focus on generating consistent reconstructions of indoor spaces from a freely moving handheld RGB-D sensor, with the aim of creating virtual models that can be used for measuring and remodeling. We propose a novel 6D RGBD odometry approach that finds the relative camera pose between consecutive RGB-D frames by keypoint extraction and feature matching both on the RGB and depth image planes. Furthermore, we feed the estimated pose to the highly accurate KinectFusion algorithm, which uses a fast ICP (Iterative-Closest-Point) to fine-tune the frame-to-frame relative pose and fuse the Depth data into a global implicit surface. We evaluate our method on a publicly available RGB-D SLAM benchmark dataset by Sturm et al. The experimental results show that our proposed reconstruction method solely based on visual odometry and KinectFusion outperforms the state-of-the-art RGB-D SLAM system accuracy. Our algorithm outputs a ready-to-use polygon mesh (highly suitable for creating 3D virtual worlds) without any post-processing steps.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Towards whole body fatigue assessment of human movement: a fatigue-tracking system based on combined sEMG and accelerometer signals.\n \n \n \n \n\n\n \n Dong, H.; Ugalde, I.; Figueroa, N.; and El Saddik, A.\n\n\n \n\n\n\n Sensors (Basel, Switzerland), 14(2): 2052–2070. 01 2014.\n \n\n\n\n
\n\n\n\n \n \n \"TowardsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{Fatigue,\n  Abstract = {This paper proposes a method to assess the overall fatigue of human body movement. First of all, according to previous research regarding localized muscular fatigue, a linear relation is assumed between the mean frequency and the muscular working time when the muscle is experiencing fatigue. This assumption is verified with a rigorous statistical analysis. Based on this proven linearity, localized muscular fatigue is simplified as a linear model. Furthermore, localized muscular fatigue is considered a dynamic process and, hence, the localized fatigue levels are tracked by updating the parameters with the most current surface electromyogram (sEMG) measurements. Finally, an overall fatigue level is computed by fusing localized muscular fatigue levels. The developed fatigue-tracking system is evaluated with two fatigue experiments (in which 10 male subjects and seven female subjects participated), including holding self-weight (dip start position training) and lifting weight with one arm (arm curl training).},\n  An = {24473280},\n  Author = {Dong, Haiwei and Ugalde, Izaskun and Figueroa, Nadia and El Saddik, Abdulmotaleb},\n  Db = {PubMed},\n  Doi = {10.3390/s140202052},\n  Isbn = {1424-8220},\n  Journal = {Sensors (Basel, Switzerland)},\n  Keywords = {Adult; Analysis of Variance; Body Mass Index; Electromyography; Female; Humans; Male; Muscle Fatigue/*physiology; Wireless Technology},\n  La = {eng},\n  Month = {01},\n  Number = {2},\n  Pages = {2052--2070},\n  Publisher = {Molecular Diversity Preservation International (MDPI)},\n  Title = {Towards whole body fatigue assessment of human movement: a fatigue-tracking system based on combined sEMG and accelerometer signals},\n  Url = {https://pubmed.ncbi.nlm.nih.gov/24473280},\n  Volume = {14},\n  Year = {2014},\n  }\n\n
\n
\n\n\n
\n This paper proposes a method to assess the overall fatigue of human body movement. First of all, according to previous research regarding localized muscular fatigue, a linear relation is assumed between the mean frequency and the muscular working time when the muscle is experiencing fatigue. This assumption is verified with a rigorous statistical analysis. Based on this proven linearity, localized muscular fatigue is simplified as a linear model. Furthermore, localized muscular fatigue is considered a dynamic process and, hence, the localized fatigue levels are tracked by updating the parameters with the most current surface electromyogram (sEMG) measurements. Finally, an overall fatigue level is computed by fusing localized muscular fatigue levels. The developed fatigue-tracking system is evaluated with two fatigue experiments (in which 10 male subjects and seven female subjects participated), including holding self-weight (dip start position training) and lifting weight with one arm (arm curl training).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Contextual object category recognition for RGB-D scene labeling.\n \n \n \n \n\n\n \n Ali, H.; Shafait, F.; Giannakidou, E.; Vakali, A.; Figueroa, N.; Varvadoukas, T.; and Mavridis, N.\n\n\n \n\n\n\n Robotics and Autonomous Systems, 62(2): 241 - 256. 2014.\n \n\n\n\n
\n\n\n\n \n \n \"ContextualPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{ALI2014241,\ntitle = "Contextual object category recognition for RGB-D scene labeling",\njournal = "Robotics and Autonomous Systems",\nvolume = "62",\nnumber = "2",\npages = "241 - 256",\nyear = "2014",\nissn = "0921-8890",\ndoi = "https://doi.org/10.1016/j.robot.2013.10.001",\nurl = "http://www.sciencedirect.com/science/article/pii/S0921889013001929",\nauthor = "Haider Ali and Faisal Shafait and Eirini Giannakidou and Athena Vakali and Nadia Figueroa and Theodoros Varvadoukas and Nikolaos Mavridis",\nkeywords = "Object recognition, Contextual modeling, RGB-D scenes, Social media, 3D scene labeling",\nabstract = "Recent advances in computer vision on the one hand, and imaging technologies on the other hand, have opened up a number of interesting possibilities for robust 3D scene labeling. This paper presents contributions in several directions to improve the state-of-the-art in RGB-D scene labeling. First, we present a novel combination of depth and color features to recognize different object categories in isolation. Then, we use a context model that exploits detection results of other objects in the scene to jointly optimize labels of co-occurring objects in the scene. Finally, we investigate the use of social media mining to develop the context model, and provide an investigation of its convergence. We perform thorough experimentation on both the publicly available RGB-D Dataset from the University of Washington as well as on the NYU scene dataset. An analysis of the results shows interesting insights about contextual object category recognition, and its benefits."\n}\n\n\n
\n
\n\n\n
\n Recent advances in computer vision on the one hand, and imaging technologies on the other hand, have opened up a number of interesting possibilities for robust 3D scene labeling. This paper presents contributions in several directions to improve the state-of-the-art in RGB-D scene labeling. First, we present a novel combination of depth and color features to recognize different object categories in isolation. Then, we use a context model that exploits detection results of other objects in the scene to jointly optimize labels of co-occurring objects in the scene. Finally, we investigate the use of social media mining to develop the context model, and provide an investigation of its convergence. We perform thorough experimentation on both the publicly available RGB-D Dataset from the University of Washington as well as on the NYU scene dataset. An analysis of the results shows interesting insights about contextual object category recognition, and its benefits.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2013\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n “Anti-fatigue” control for over-actuated bionic arm with muscle force constraints.\n \n \n \n\n\n \n Dong, H.; Yazdkhasti, S.; Figueroa, N.; and Saddik, A. E.\n\n\n \n\n\n\n In 2013 IEEE/RSJ International Conference on Intelligent Robots and Systems, pages 335-342, 2013. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{6696373,\n  author={H. {Dong} and S. {Yazdkhasti} and N. {Figueroa} and A. E. {Saddik}},\n  booktitle={2013 IEEE/RSJ International Conference on Intelligent Robots and Systems}, \n  title={“Anti-fatigue” control for over-actuated bionic arm with muscle force constraints}, \n  year={2013},\n  volume={},\n  number={},\n  pages={335-342},\n  doi={10.1109/IROS.2013.6696373}, \n  abstract={In this paper, we propose an “anti-fatigue” control method for bionic actuated systems. Specifically, the proposed method is illustrated on an over-actuated bionic arm. Our control method consists of two steps. In the first step, a set of linear equations is derived by connecting the acceleration description in both joint and muscle space. The pseudo inverse solution to these equations provides an initial optimal muscle force distribution. As a second step, we derive a gradient direction for muscle force redistribution. This allows the muscles to satisfy force constraints and generate an even distribution of forces throughout all the muscles (i.e. towards "anti-fatigue"). The overall proposed method is tested for a bending-stretching movement. We used two models (bionic arm with 6 and 10 muscles) to verify the method. The force distribution analysis verifies the “anti-fatigue” property of the computed muscle force. The efficiency comparison shows that the computational time does not increase significantly with the increase of muscle number. The tracking error statistics of the two models show the validity of the method.}}\n\n\n
\n
\n\n\n
\n In this paper, we propose an “anti-fatigue” control method for bionic actuated systems. Specifically, the proposed method is illustrated on an over-actuated bionic arm. Our control method consists of two steps. In the first step, a set of linear equations is derived by connecting the acceleration description in both joint and muscle space. The pseudo inverse solution to these equations provides an initial optimal muscle force distribution. As a second step, we derive a gradient direction for muscle force redistribution. This allows the muscles to satisfy force constraints and generate an even distribution of forces throughout all the muscles (i.e. towards \"anti-fatigue\"). The overall proposed method is tested for a bending-stretching movement. We used two models (bionic arm with 6 and 10 muscles) to verify the method. The force distribution analysis verifies the “anti-fatigue” property of the computed muscle force. The efficiency comparison shows that the computational time does not increase significantly with the increase of muscle number. The tracking error statistics of the two models show the validity of the method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Muscle Force Control of a Kinematically Redundant Bionic Arm with Real-Time Parameter Update.\n \n \n \n\n\n \n Dong, H.; Figueroa, N.; and Saddik, A. E.\n\n\n \n\n\n\n In 2013 IEEE International Conference on Systems, Man, and Cybernetics, pages 1640-1647, 2013. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{6722036,\n  author={H. {Dong} and N. {Figueroa} and A. E. {Saddik}},\n  booktitle={2013 IEEE International Conference on Systems, Man, and Cybernetics}, \n  title={Muscle Force Control of a Kinematically Redundant Bionic Arm with Real-Time Parameter Update}, \n  year={2013},\n  volume={},\n  number={},\n  pages={1640-1647},\n  doi={10.1109/SMC.2013.283},\n  abstract={Redundant muscle-driven arms have numerous advantages, such as increased robustness, ability for load distribution, impedance change etc. However, controlling such a muscle-driven arm is a difficult task. This is mainly due to its redundancy, specially when the muscle force is required to follow certain output constraints and fulfill optimization objectives. In this paper, a new method for controlling such muscle-like systems is proposed. By considering both joint and muscle acceleration contributions, a set of linear equations was constructed. Driving muscle activation is thus framed as the only unknown vector. To solve this linear equation set, a pseudo-inverse solution was used. The null space within this solution represents the internal force, which was used to evenly distribute the muscle forces, which is considered as "anti-fatigue" way. Moreover, to make the proposed method adaptive to modeling errors, an estimated system model was added to represent the real model. By updating the parameters of the estimated model based on prediction error, the estimated model approaches the real model gradually in real time. The overall method was tested for the case of a bending-stretching movement. The presented results verify the validity of the method, and illustrate its useful features and advantages.}\n  }\n\n
\n
\n\n\n
\n Redundant muscle-driven arms have numerous advantages, such as increased robustness, ability for load distribution, impedance change etc. However, controlling such a muscle-driven arm is a difficult task. This is mainly due to its redundancy, specially when the muscle force is required to follow certain output constraints and fulfill optimization objectives. In this paper, a new method for controlling such muscle-like systems is proposed. By considering both joint and muscle acceleration contributions, a set of linear equations was constructed. Driving muscle activation is thus framed as the only unknown vector. To solve this linear equation set, a pseudo-inverse solution was used. The null space within this solution represents the internal force, which was used to evenly distribute the muscle forces, which is considered as \"anti-fatigue\" way. Moreover, to make the proposed method adaptive to modeling errors, an estimated system model was added to represent the real model. By updating the parameters of the estimated model based on prediction error, the estimated model approaches the real model gradually in real time. The overall method was tested for the case of a bending-stretching movement. The presented results verify the validity of the method, and illustrate its useful features and advantages.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n From Sense to Print: Towards Automatic 3D Printing from 3D Sensing Devices.\n \n \n \n\n\n \n Figueroa, N.; Dong, H.; and Saddik, A. E.\n\n\n \n\n\n\n In 2013 IEEE International Conference on Systems, Man, and Cybernetics, pages 4897-4904, 2013. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{6722588,\n  author={N. {Figueroa} and H. {Dong} and A. E. {Saddik}},\n  booktitle={2013 IEEE International Conference on Systems, Man, and Cybernetics}, \n  title={From Sense to Print: Towards Automatic 3D Printing from 3D Sensing Devices}, \n  year={2013},\n  volume={},\n  number={},\n  pages={4897-4904},\n  doi={10.1109/SMC.2013.833}, \n  abstract={In this paper, we introduce the From Sense to Print system. It is a system where a 3D sensing device connected to the cloud is used to reconstruct an object or a human and generate 3D CAD models which are sent automatically to a 3D printer. In other words, we generate ready-to-print 3D models of objects without manual intervention in the processing pipeline. Our proposed system is validated with an experimental prototype using the Kinect sensor as the 3D sensing device, the KinectFusion algorithm as our reconstruction algorithm and a fused deposition modeling (FDM) 3D printer. In order for the pipeline to be automatic, we propose a semantic segmentation algorithm applied to the 3D reconstructed object, based on the tracked camera poses obtained from the reconstruction phase. The segmentation algorithm works with both inanimate objects lying on a table/floor or with humans. Furthermore, we automatically scale the model to fit in the maximum volume of the 3D printer at hand. Finally, we present initial results from our experimental prototype and discuss the current limitations.}\n  }\n\n
\n
\n\n\n
\n In this paper, we introduce the From Sense to Print system. It is a system where a 3D sensing device connected to the cloud is used to reconstruct an object or a human and generate 3D CAD models which are sent automatically to a 3D printer. In other words, we generate ready-to-print 3D models of objects without manual intervention in the processing pipeline. Our proposed system is validated with an experimental prototype using the Kinect sensor as the 3D sensing device, the KinectFusion algorithm as our reconstruction algorithm and a fused deposition modeling (FDM) 3D printer. In order for the pipeline to be automatic, we propose a semantic segmentation algorithm applied to the 3D reconstructed object, based on the tracked camera poses obtained from the reconstruction phase. The segmentation algorithm works with both inanimate objects lying on a table/floor or with humans. Furthermore, we automatically scale the model to fit in the maximum volume of the 3D printer at hand. Finally, we present initial results from our experimental prototype and discuss the current limitations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Joint origin identification of articulated robots with marker-based multi-camera optical tracking systems.\n \n \n \n \n\n\n \n Figueroa, N. B.; Schmidt, F.; Ali, H.; and Mavridis, N.\n\n\n \n\n\n\n Robotics and Autonomous Systems, 61(6): 580 - 592. 2013.\n \n\n\n\n
\n\n\n\n \n \n \"JointPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{FIGUEROA2013580,\ntitle = "Joint origin identification of articulated robots with marker-based multi-camera optical tracking systems",\njournal = "Robotics and Autonomous Systems",\nvolume = "61",\nnumber = "6",\npages = "580 - 592",\nyear = "2013",\nissn = "0921-8890",\ndoi = "https://doi.org/10.1016/j.robot.2013.02.008",\nurl = "http://www.sciencedirect.com/science/article/pii/S0921889013000444",\nauthor = "Nadia B. Figueroa and Florian Schmidt and Haider Ali and Nikolaos Mavridis",\nkeywords = "Joint identification, Marker-based multi-camera optical tracking system, Calibration, Articulated robots",\nabstract = "Marker-based multi-camera optical tracking systems are being used in the robotics field to track robots for validation, verification, and calibration of their kinematic and dynamic models. These tracking systems estimate the pose of tracking bodies attached to objects within a tracking volume. In this work, we explore the case of tracking the origins of joints of articulated robots when the tracking bodies are mounted on limbs or structures relative to the joints. This configuration leads to an unknown relative pose between the tracking body and the joint origin. The identification of this relative pose is essential for an accurate representation of the kinematic model. We propose an approach for the identification of the origin of joints relative to tracking bodies by using state-of-the-art center of rotation (CoR) and axis of rotation (AoR) estimation methods. The applicability and effectiveness of our approach is demonstrated in two successful case studies: (i) the verification of the upper body kinematics of DLR’s humanoid Rollin’ Justin and (ii) the identification of the kinematic parameters of an ST Robot arm relative to its environment for the embodiment of a situated conversational assistant."\n}\n\n
\n
\n\n\n
\n Marker-based multi-camera optical tracking systems are being used in the robotics field to track robots for validation, verification, and calibration of their kinematic and dynamic models. These tracking systems estimate the pose of tracking bodies attached to objects within a tracking volume. In this work, we explore the case of tracking the origins of joints of articulated robots when the tracking bodies are mounted on limbs or structures relative to the joints. This configuration leads to an unknown relative pose between the tracking body and the joint origin. The identification of this relative pose is essential for an accurate representation of the kinematic model. We propose an approach for the identification of the origin of joints relative to tracking bodies by using state-of-the-art center of rotation (CoR) and axis of rotation (AoR) estimation methods. The applicability and effectiveness of our approach is demonstrated in two successful case studies: (i) the verification of the upper body kinematics of DLR’s humanoid Rollin’ Justin and (ii) the identification of the kinematic parameters of an ST Robot arm relative to its environment for the embodiment of a situated conversational assistant.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Approaching behaviour monitor and vibration indication in developing a general moving object alarm system (GMOAS).\n \n \n \n\n\n \n Dong, H.; Giakoumidis, N.; Figueroa, N.; and Mavridis, N.\n\n\n \n\n\n\n International Journal of Advanced Robotic Systems, 10. 2013.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{Approaching,\ntitle = "Approaching behaviour monitor and vibration indication in developing a general moving object alarm system (GMOAS)",\nauthor = "Dong, Haiwei and Giakoumidis, Nikolas and Figueroa, Nadia and Mavridis, Nikolaos",\nyear = "2013",\ndoi = "10.5772/56586",\nvolume = "10",\njournal = "International Journal of Advanced Robotic Systems",\nissn = "1729-8806",\npublisher = "Vienna University of Technology",\n\nabstract = "People who suffer from hearing impairment caused by illness, age or extremely noisy environments are constantly in danger of being hit or knocked down by fast moving objects behind them when they have no companion or augmented sensory system to warn them. In this paper, we propose the General Moving Object Alarm System (GMOAS), a system focused on aiding the safe mobility of people under these circumstances. The GMOAS is a wearable haptic device that consists of two main subsystems: (i) a moving object monitoring subsystem that uses laser range data to detect and track approaching objects, and (ii) an alarm subsystem that warns the user of possibly dangerous approaching objects by triggering tactile vibrations on an {"}
\n
\n\n\n
\n People who suffer from hearing impairment caused by illness, age or extremely noisy environments are constantly in danger of being hit or knocked down by fast moving objects behind them when they have no companion or augmented sensory system to warn them. In this paper, we propose the General Moving Object Alarm System (GMOAS), a system focused on aiding the safe mobility of people under these circumstances. The GMOAS is a wearable haptic device that consists of two main subsystems: (i) a moving object monitoring subsystem that uses laser range data to detect and track approaching objects, and (ii) an alarm subsystem that warns the user of possibly dangerous approaching objects by triggering tactile vibrations on an \n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2012\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n 3D Registration for Verification of Humanoid Justin's Upper Body Kinematics.\n \n \n \n\n\n \n Figueroa, N.; Ali, H.; and Schmidt, F.\n\n\n \n\n\n\n In 2012 Ninth Conference on Computer and Robot Vision, pages 276-283, 2012. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{6233152,\n  author={N. {Figueroa} and H. {Ali} and F. {Schmidt}},\n  booktitle={2012 Ninth Conference on Computer and Robot Vision}, \n  title={3D Registration for Verification of Humanoid Justin's Upper Body Kinematics}, \n  year={2012},\n  volume={},\n  number={},\n  pages={276-283},\n  doi={10.1109/CRV.2012.43},\n  abstract={Humanoid robots such as DLR's Justin are built with light-weight structures and flexible mechanical components. These generate positioning errors at the TCP (Tool-Center-Point) end-pose of the hand. The identification of these errors is essential for object manipulation and path planning. We proposed a verification routine to identify the bounds of the TCP end-pose errors by using the on-board stereo vision system. It involves estimating the pose of 3D point clouds of Justin's hand by using state-of-the-art 3D registration techniques. Partial models of the hand were generated by registering subsets of overlapping 3D point clouds. We proposed a method for the selection of overlapping point clouds of self-occluding objects (Justin's hand). It is based on a statistical analysis of the depth values. We applied an extended metaview registration method to the resulting subset of point clouds. The partial models were evaluated with detailed based surface consistency measures. The TCP end-pose errors estimated by using our method are consistent with ground-truth errors.}}\n\n\n\n%% Workshops %%\n\n
\n
\n\n\n
\n Humanoid robots such as DLR's Justin are built with light-weight structures and flexible mechanical components. These generate positioning errors at the TCP (Tool-Center-Point) end-pose of the hand. The identification of these errors is essential for object manipulation and path planning. We proposed a verification routine to identify the bounds of the TCP end-pose errors by using the on-board stereo vision system. It involves estimating the pose of 3D point clouds of Justin's hand by using state-of-the-art 3D registration techniques. Partial models of the hand were generated by registering subsets of overlapping 3D point clouds. We proposed a method for the selection of overlapping point clouds of self-occluding objects (Justin's hand). It is based on a statistical analysis of the depth values. We applied an extended metaview registration method to the resulting subset of point clouds. The partial models were evaluated with detailed based surface consistency measures. The TCP end-pose errors estimated by using our method are consistent with ground-truth errors.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Segmentation and Pose Estimation of Planar Metallic Objects.\n \n \n \n\n\n \n Ali, H.; and Figueroa, N.\n\n\n \n\n\n\n In 2012 Ninth Conference on Computer and Robot Vision, pages 376-382, 2012. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@INPROCEEDINGS{6233165,\n  author={H. {Ali} and N. {Figueroa}},\n  booktitle={2012 Ninth Conference on Computer and Robot Vision}, \n  title={Segmentation and Pose Estimation of Planar Metallic Objects}, \n  year={2012},\n  volume={},\n  number={},\n  pages={376-382},\n  doi={10.1109/CRV.2012.56}, \n  abstract={The problem of estimating the pose of metallic objects with shiny surfaces is studied. A new application has been developed using state-of-the-art 3D object segmentation (euclidean clustering) and pose estimation (ICP) methods. We analyze the planar surfaces of the metallic objects in 3D laser scanner data. First we segment these planar objects using euclidean clustering based on surface normals. Thereafter to estimate the pose of these segmented objects we compute Fast Point Feature Histograms (FPFH) descriptors. Finally we use an ICP algorithm that computes the rigid transformation with Singular Value Decomposition(SVD). Two different round of experiments are conducted:-one for the clustering and the other one for the pose estimation. We present the experimental results and analysis along with the possible application scenario and future work.}}\n\n\n
\n
\n\n\n
\n The problem of estimating the pose of metallic objects with shiny surfaces is studied. A new application has been developed using state-of-the-art 3D object segmentation (euclidean clustering) and pose estimation (ICP) methods. We analyze the planar surfaces of the metallic objects in 3D laser scanner data. First we segment these planar objects using euclidean clustering based on surface normals. Thereafter to estimate the pose of these segmented objects we compute Fast Point Feature Histograms (FPFH) descriptors. Finally we use an ICP algorithm that computes the rigid transformation with Singular Value Decomposition(SVD). Two different round of experiments are conducted:-one for the clustering and the other one for the pose estimation. We present the experimental results and analysis along with the possible application scenario and future work.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);